blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 4
721
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
57
| license_type
stringclasses 2
values | repo_name
stringlengths 5
91
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 321
values | visit_date
timestamp[ns]date 2016-08-12 09:31:09
2023-09-06 10:45:07
| revision_date
timestamp[ns]date 2010-09-28 14:01:40
2023-09-06 06:22:19
| committer_date
timestamp[ns]date 2010-09-28 14:01:40
2023-09-06 06:22:19
| github_id
int64 426
681M
| star_events_count
int64 101
243k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[ns]date 2012-06-28 18:51:49
2023-09-14 21:59:16
⌀ | gha_created_at
timestamp[ns]date 2008-02-11 22:55:26
2023-08-10 11:14:58
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 26
values | language
stringclasses 2
values | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 6
10.2M
| extension
stringclasses 115
values | filename
stringlengths 3
113
| content
stringlengths 6
10.2M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
cd925eff3cbd9ad5d9a16f17279d7cab57166259
|
96dcea595e7c16cec07b3f649afd65f3660a0bad
|
/homeassistant/components/concord232/alarm_control_panel.py
|
de5d4495a85e3f7321f8aed3c039c22d969e5ab9
|
[
"Apache-2.0"
] |
permissive
|
home-assistant/core
|
3455eac2e9d925c92d30178643b1aaccf3a6484f
|
80caeafcb5b6e2f9da192d0ea6dd1a5b8244b743
|
refs/heads/dev
| 2023-08-31T15:41:06.299469
| 2023-08-31T14:50:53
| 2023-08-31T14:50:53
| 12,888,993
| 35,501
| 20,617
|
Apache-2.0
| 2023-09-14T21:50:15
| 2013-09-17T07:29:48
|
Python
|
UTF-8
|
Python
| false
| false
| 4,646
|
py
|
alarm_control_panel.py
|
"""Support for Concord232 alarm control panels."""
from __future__ import annotations
import datetime
import logging
from concord232 import client as concord232_client
import requests
import voluptuous as vol
import homeassistant.components.alarm_control_panel as alarm
from homeassistant.components.alarm_control_panel import (
PLATFORM_SCHEMA as PARENT_PLATFORM_SCHEMA,
AlarmControlPanelEntityFeature,
)
from homeassistant.const import (
CONF_CODE,
CONF_HOST,
CONF_MODE,
CONF_NAME,
CONF_PORT,
STATE_ALARM_ARMED_AWAY,
STATE_ALARM_ARMED_HOME,
STATE_ALARM_DISARMED,
)
from homeassistant.core import HomeAssistant
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.typing import ConfigType, DiscoveryInfoType
_LOGGER = logging.getLogger(__name__)
DEFAULT_HOST = "localhost"
DEFAULT_NAME = "CONCORD232"
DEFAULT_PORT = 5007
DEFAULT_MODE = "audible"
SCAN_INTERVAL = datetime.timedelta(seconds=10)
PLATFORM_SCHEMA = PARENT_PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_HOST, default=DEFAULT_HOST): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_CODE): cv.string,
vol.Optional(CONF_MODE, default=DEFAULT_MODE): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
}
)
def setup_platform(
hass: HomeAssistant,
config: ConfigType,
add_entities: AddEntitiesCallback,
discovery_info: DiscoveryInfoType | None = None,
) -> None:
"""Set up the Concord232 alarm control panel platform."""
name = config[CONF_NAME]
code = config.get(CONF_CODE)
mode = config[CONF_MODE]
host = config[CONF_HOST]
port = config[CONF_PORT]
url = f"http://{host}:{port}"
try:
add_entities([Concord232Alarm(url, name, code, mode)], True)
except requests.exceptions.ConnectionError as ex:
_LOGGER.error("Unable to connect to Concord232: %s", str(ex))
class Concord232Alarm(alarm.AlarmControlPanelEntity):
"""Representation of the Concord232-based alarm panel."""
_attr_code_format = alarm.CodeFormat.NUMBER
_attr_state: str | None
_attr_supported_features = (
AlarmControlPanelEntityFeature.ARM_HOME
| AlarmControlPanelEntityFeature.ARM_AWAY
)
def __init__(self, url, name, code, mode):
"""Initialize the Concord232 alarm panel."""
self._attr_name = name
self._code = code
self._mode = mode
self._url = url
self._alarm = concord232_client.Client(self._url)
self._alarm.partitions = self._alarm.list_partitions()
def update(self) -> None:
"""Update values from API."""
try:
part = self._alarm.list_partitions()[0]
except requests.exceptions.ConnectionError as ex:
_LOGGER.error(
"Unable to connect to %(host)s: %(reason)s",
{"host": self._url, "reason": ex},
)
return
except IndexError:
_LOGGER.error("Concord232 reports no partitions")
return
if part["arming_level"] == "Off":
self._attr_state = STATE_ALARM_DISARMED
elif "Home" in part["arming_level"]:
self._attr_state = STATE_ALARM_ARMED_HOME
else:
self._attr_state = STATE_ALARM_ARMED_AWAY
def alarm_disarm(self, code: str | None = None) -> None:
"""Send disarm command."""
if not self._validate_code(code, STATE_ALARM_DISARMED):
return
self._alarm.disarm(code)
def alarm_arm_home(self, code: str | None = None) -> None:
"""Send arm home command."""
if not self._validate_code(code, STATE_ALARM_ARMED_HOME):
return
if self._mode == "silent":
self._alarm.arm("stay", "silent")
else:
self._alarm.arm("stay")
def alarm_arm_away(self, code: str | None = None) -> None:
"""Send arm away command."""
if not self._validate_code(code, STATE_ALARM_ARMED_AWAY):
return
self._alarm.arm("away")
def _validate_code(self, code, state):
"""Validate given code."""
if self._code is None:
return True
if isinstance(self._code, str):
alarm_code = self._code
else:
alarm_code = self._code.render(from_state=self._attr_state, to_state=state)
check = not alarm_code or code == alarm_code
if not check:
_LOGGER.warning("Invalid code given for %s", state)
return check
|
04e9ff7b339f6814b4e8af949e8c420269cff122
|
b049a961f100444dde14599bab06a0a4224d869b
|
/sdk/python/pulumi_azure_native/datadog/__init__.py
|
b2fae64eecb8d38abc136a32e4bdd4e750d3024d
|
[
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
pulumi/pulumi-azure-native
|
b390c88beef8381f9a71ab2bed5571e0dd848e65
|
4c499abe17ec6696ce28477dde1157372896364e
|
refs/heads/master
| 2023-08-30T08:19:41.564780
| 2023-08-28T19:29:04
| 2023-08-28T19:29:04
| 172,386,632
| 107
| 29
|
Apache-2.0
| 2023-09-14T13:17:00
| 2019-02-24T20:30:21
|
Python
|
UTF-8
|
Python
| false
| false
| 790
|
py
|
__init__.py
|
# coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
from .. import _utilities
import typing
# Export this package's modules as members:
from ._enums import *
from .get_monitor import *
from .get_monitor_default_key import *
from .list_monitor_api_keys import *
from .list_monitor_hosts import *
from .list_monitor_linked_resources import *
from .list_monitor_monitored_resources import *
from .monitor import *
from ._inputs import *
from . import outputs
# Make subpackages available:
if typing.TYPE_CHECKING:
import pulumi_azure_native.datadog.v20220601 as __v20220601
v20220601 = __v20220601
else:
v20220601 = _utilities.lazy_import('pulumi_azure_native.datadog.v20220601')
|
9f642f65537d7987c37a58f30355132bb66140ce
|
b347bc4b850dee4a8a9a171b563a3f31230ce1c7
|
/sktime/forecasting/trend.py
|
a1168dcfc9059fe84bf5f9a02977dee3d7799016
|
[
"BSD-3-Clause"
] |
permissive
|
sktime/sktime
|
5963962df338c5931a2f9f1794d1203c50ddc27e
|
70b2bfaaa597eb31bc3a1032366dcc0e1f4c8a9f
|
refs/heads/main
| 2023-08-22T18:20:08.022950
| 2023-08-22T15:24:39
| 2023-08-22T15:24:39
| 156,401,841
| 1,117
| 268
|
BSD-3-Clause
| 2023-09-14T20:44:21
| 2018-11-06T15:08:24
|
Python
|
UTF-8
|
Python
| false
| false
| 24,799
|
py
|
trend.py
|
# !/usr/bin/env python3 -u
# copyright: sktime developers, BSD-3-Clause License (see LICENSE file)
"""Implements trend based forecasters."""
__author__ = ["tensorflow-as-tf", "mloning", "aiwalter", "fkiraly"]
__all__ = ["TrendForecaster", "PolynomialTrendForecaster", "STLForecaster"]
import pandas as pd
from sklearn.base import clone
from sklearn.linear_model import LinearRegression
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import PolynomialFeatures
from sktime.forecasting.base import BaseForecaster
def _get_X_numpy_int_from_pandas(x):
"""Convert pandas index to an sklearn compatible X, 2D np.ndarray, int type."""
if isinstance(x, (pd.DatetimeIndex)):
x = x.astype("int64") / 864e11
else:
x = x.astype("int64")
return x.to_numpy().reshape(-1, 1)
class TrendForecaster(BaseForecaster):
r"""Trend based forecasts of time series data, regressing values on index.
Uses a `sklearn` regressor `regressor` to regress values of time series on index:
In `fit`, for input time series :math:`(v_i, t_i), i = 1, \dots, T`,
where :math:`v_i` are values and :math:`t_i` are time stamps,
fits an `sklearn` model :math:`v_i = f(t_i) + \epsilon_i`, where `f` is
the model fitted when `regressor.fit` is passed `X` = vector of :math:`t_i`,
and `y` = vector of :math:`v_i`.
In `predict`, for a new time point :math:`t_*`, predicts :math:`f(t_*)`,
where :math:`f` is the function as fitted above in `fit`.
Default for `regressor` is linear regression = `sklearn` `LinearRegression` default.
If time stamps are `pd.DatetimeIndex`, fitted coefficients are in units
of days since start of 1970. If time stamps are `pd.PeriodIndex`,
coefficients are in units of (full) periods since start of 1970.
Parameters
----------
regressor : estimator object, default = None
Define the regression model type. If not set, will default to
sklearn.linear_model.LinearRegression
Examples
--------
>>> from sktime.datasets import load_airline
>>> from sktime.forecasting.trend import TrendForecaster
>>> y = load_airline()
>>> forecaster = TrendForecaster()
>>> forecaster.fit(y)
TrendForecaster(...)
>>> y_pred = forecaster.predict(fh=[1,2,3])
"""
_tags = {
"ignores-exogeneous-X": True,
"requires-fh-in-fit": False,
"handles-missing-data": False,
}
def __init__(self, regressor=None):
# for default regressor, set fit_intercept=True
self.regressor = regressor
super().__init__()
def _fit(self, y, X, fh):
"""Fit to training data.
Parameters
----------
y : pd.Series
Target time series with which to fit the forecaster.
X : pd.DataFrame, default=None
Exogenous variables are ignored
fh : int, list or np.array, optional (default=None)
The forecasters horizon with the steps ahead to to predict.
Returns
-------
self : returns an instance of self.
"""
if self.regressor is None:
self.regressor_ = LinearRegression(fit_intercept=True)
else:
self.regressor_ = clone(self.regressor)
# we regress index on series values
# the sklearn X is obtained from the index of y
# the sklearn y can be taken as the y seen here
X_sklearn = _get_X_numpy_int_from_pandas(y.index)
# fit regressor
self.regressor_.fit(X_sklearn, y)
return self
def _predict(self, fh=None, X=None):
"""Make forecasts for the given forecast horizon.
Parameters
----------
fh : int, list or np.array
The forecast horizon with the steps ahead to predict
X : pd.DataFrame, default=None
Exogenous variables (ignored)
Returns
-------
y_pred : pd.Series
Point predictions for the forecast
"""
# use relative fh as time index to predict
fh = self.fh.to_absolute_index(self.cutoff)
X_sklearn = _get_X_numpy_int_from_pandas(fh)
y_pred_sklearn = self.regressor_.predict(X_sklearn)
y_pred = pd.Series(y_pred_sklearn, index=fh)
y_pred.name = self._y.name
return y_pred
@classmethod
def get_test_params(cls, parameter_set="default"):
"""Return testing parameter settings for the estimator.
Parameters
----------
parameter_set : str, default="default"
Name of the set of test parameters to return, for use in tests. If no
special parameters are defined for a value, will return `"default"` set.
Returns
-------
params : dict or list of dict, default = {}
Parameters to create testing instances of the class
Each dict are parameters to construct an "interesting" test instance, i.e.,
`MyClass(**params)` or `MyClass(**params[i])` creates a valid test instance.
`create_test_instance` uses the first (or only) dictionary in `params`
"""
from sklearn.ensemble import RandomForestRegressor
params_list = [{}, {"regressor": RandomForestRegressor()}]
return params_list
class PolynomialTrendForecaster(BaseForecaster):
r"""Forecast time series data with a polynomial trend.
Uses a `sklearn` regressor `regressor` to regress values of time series on index,
after extraction of polynomial features.
Same `TrendForecaster` where `regressor` is pipelined with transformation step
`PolynomialFeatures(degree, with_intercept)` applied to time, at the start.
In `fit`, for input time series :math:`(v_i, p(t_i)), i = 1, \dots, T`,
where :math:`v_i` are values, :math:`t_i` are time stamps,
and :math:`p` is the polynomial feature transform with degree `degree`,
and with/without intercept depending on `with_intercept`,
fits an `sklearn` model :math:`v_i = f(p(t_i)) + \epsilon_i`, where `f` is
the model fitted when `regressor.fit` is passed `X` = vector of :math:`p(t_i)`,
and `y` = vector of :math:`v_i`.
In `predict`, for a new time point :math:`t_*`, predicts :math:`f(p(t_*))`,
where :math:`f` is the function as fitted above in `fit`,
and :math:`p` is the same polynomial feature transform as above.
Default for `regressor` is linear regression = `sklearn` `LinearRegression` default.
If time stamps are `pd.DatetimeIndex`, fitted coefficients are in units
of days since start of 1970. If time stamps are `pd.PeriodIndex`,
coefficients are in units of (full) periods since start of 1970.
Parameters
----------
regressor : sklearn regressor estimator object, default = None
Define the regression model type. If not set, will default to
sklearn.linear_model.LinearRegression
degree : int, default = 1
Degree of polynomial function
with_intercept : bool, default=True
If true, then include a feature in which all polynomial powers are
zero. (i.e. a column of ones, acts as an intercept term in a linear
model)
Examples
--------
>>> from sktime.datasets import load_airline
>>> from sktime.forecasting.trend import PolynomialTrendForecaster
>>> y = load_airline()
>>> forecaster = PolynomialTrendForecaster(degree=1)
>>> forecaster.fit(y)
PolynomialTrendForecaster(...)
>>> y_pred = forecaster.predict(fh=[1,2,3])
"""
_tags = {
"ignores-exogeneous-X": True,
"requires-fh-in-fit": False,
"handles-missing-data": False,
}
def __init__(self, regressor=None, degree=1, with_intercept=True):
self.regressor = regressor
self.degree = degree
self.with_intercept = with_intercept
self.regressor_ = self.regressor
super().__init__()
def _fit(self, y, X, fh):
"""Fit to training data.
Parameters
----------
y : pd.Series
Target time series with which to fit the forecaster.
X : pd.DataFrame, default=None
Exogenous variables are ignored
fh : int, list or np.array, default=None
The forecasters horizon with the steps ahead to to predict.
Returns
-------
self : returns an instance of self.
"""
# for default regressor, set fit_intercept=False as we generate a
# dummy variable in polynomial features
if self.regressor is None:
regressor = LinearRegression(fit_intercept=False)
else:
regressor = clone(self.regressor)
# make pipeline with polynomial features
self.regressor_ = make_pipeline(
PolynomialFeatures(degree=self.degree, include_bias=self.with_intercept),
regressor,
)
# we regress index on series values
# the sklearn X is obtained from the index of y
# the sklearn y can be taken as the y seen here
X_sklearn = _get_X_numpy_int_from_pandas(y.index)
# fit regressor
self.regressor_.fit(X_sklearn, y)
return self
def _predict(self, fh=None, X=None):
"""Make forecasts for the given forecast horizon.
Parameters
----------
fh : int, list or np.array
The forecast horizon with the steps ahead to predict
X : pd.DataFrame, default=None
Exogenous variables (ignored)
Returns
-------
y_pred : pd.Series
Point predictions for the forecast
"""
# use relative fh as time index to predict
fh = self.fh.to_absolute_index(self.cutoff)
X_sklearn = _get_X_numpy_int_from_pandas(fh)
y_pred_sklearn = self.regressor_.predict(X_sklearn)
y_pred = pd.Series(y_pred_sklearn, index=fh)
y_pred.name = self._y.name
return y_pred
@classmethod
def get_test_params(cls, parameter_set="default"):
"""Return testing parameter settings for the estimator.
Parameters
----------
parameter_set : str, default="default"
Name of the set of test parameters to return, for use in tests. If no
special parameters are defined for a value, will return `"default"` set.
Returns
-------
params : dict or list of dict, default = {}
Parameters to create testing instances of the class
Each dict are parameters to construct an "interesting" test instance, i.e.,
`MyClass(**params)` or `MyClass(**params[i])` creates a valid test instance.
`create_test_instance` uses the first (or only) dictionary in `params`
"""
from sklearn.ensemble import RandomForestRegressor
params_list = [
{},
{
"regressor": RandomForestRegressor(),
"degree": 2,
"with_intercept": False,
},
]
return params_list
class STLForecaster(BaseForecaster):
"""Implements STLForecaster based on statsmodels.tsa.seasonal.STL implementation.
The STLForecaster applies the following algorithm, also see [1]_.
in `fit`:
1. use `statsmodels` `STL` [2]_ to decompose the given series `y` into
the three components: `trend`, `season` and `residuals`.
2. fit clones of `forecaster_trend` to `trend`, `forecaster_seasonal` to `season`,
and `forecaster_resid` to `residuals`, using `y`, `X`, `fh` from `fit`.
The forecasters are fitted as clones, stored in the attributes
`forecaster_trend_`, `forecaster_seasonal_`, `forecaster_resid_`.
In `predict`, forecasts as follows:
1. obtain forecasts `y_pred_trend` from `forecaster_trend_`,
`y_pred_seasonal` from `forecaster_seasonal_`, and
`y_pred_residual` from `forecaster_resid_`, using `X`, `fh`, from `predict`.
2. recompose `y_pred` as `y_pred = y_pred_trend + y_pred_seasonal + y_pred_residual`
3. return `y_pred`
`update` refits entirely, i.e., behaves as `fit` on all data seen so far.
Parameters
----------
sp : int, optional, default=2. Passed to `statsmodels` `STL`.
Length of the seasonal period passed to `statsmodels` `STL`.
(forecaster_seasonal, forecaster_resid) that are None. The
default forecaster_trend does not get sp as trend is independent
to seasonality.
seasonal : int, optional., default=7. Passed to `statsmodels` `STL`.
Length of the seasonal smoother. Must be an odd integer >=3, and should
normally be >= 7 (default).
trend : {int, None}, optional, default=None. Passed to `statsmodels` `STL`.
Length of the trend smoother. Must be an odd integer. If not provided
uses the smallest odd integer greater than
1.5 * period / (1 - 1.5 / seasonal), following the suggestion in
the original implementation.
low_pass : {int, None}, optional, default=None. Passed to `statsmodels` `STL`.
Length of the low-pass filter. Must be an odd integer >=3. If not
provided, uses the smallest odd integer > period.
seasonal_deg : int, optional, default=1. Passed to `statsmodels` `STL`.
Degree of seasonal LOESS. 0 (constant) or 1 (constant and trend).
trend_deg : int, optional, default=1. Passed to `statsmodels` `STL`.
Degree of trend LOESS. 0 (constant) or 1 (constant and trend).
low_pass_deg : int, optional, default=1. Passed to `statsmodels` `STL`.
Degree of low pass LOESS. 0 (constant) or 1 (constant and trend).
robust : bool, optional, default=False. Passed to `statsmodels` `STL`.
Flag indicating whether to use a weighted version that is robust to
some forms of outliers.
seasonal_jump : int, optional, default=1. Passed to `statsmodels` `STL`.
Positive integer determining the linear interpolation step. If larger
than 1, the LOESS is used every seasonal_jump points and linear
interpolation is between fitted points. Higher values reduce
estimation time.
trend_jump : int, optional, default=1. Passed to `statsmodels` `STL`.
Positive integer determining the linear interpolation step. If larger
than 1, the LOESS is used every trend_jump points and values between
the two are linearly interpolated. Higher values reduce estimation
time.
low_pass_jump : int, optional, default=1. Passed to `statsmodels` `STL`.
Positive integer determining the linear interpolation step. If larger
than 1, the LOESS is used every low_pass_jump points and values between
the two are linearly interpolated. Higher values reduce estimation
time.
inner_iter: int or None, optional, default=None. Passed to `statsmodels` `STL`.
Number of iterations to perform in the inner loop. If not provided uses 2 if
robust is True, or 5 if not. This param goes into STL.fit() from statsmodels.
outer_iter: int or None, optional, default=None. Passed to `statsmodels` `STL`.
Number of iterations to perform in the outer loop. If not provided uses 15 if
robust is True, or 0 if not. This param goes into STL.fit() from statsmodels.
forecaster_trend : sktime forecaster, optional
Forecaster to be fitted on trend_ component of the
STL, by default None. If None, then
a NaiveForecaster(strategy="drift") is used.
forecaster_seasonal : sktime forecaster, optional
Forecaster to be fitted on seasonal_ component of the
STL, by default None. If None, then
a NaiveForecaster(strategy="last") is used.
forecaster_resid : sktime forecaster, optional
Forecaster to be fitted on resid_ component of the
STL, by default None. If None, then
a NaiveForecaster(strategy="mean") is used.
Attributes
----------
trend_ : pd.Series
Trend component.
seasonal_ : pd.Series
Seasonal component.
resid_ : pd.Series
Residuals component.
forecaster_trend_ : sktime forecaster
Fitted trend forecaster.
forecaster_seasonal_ : sktime forecaster
Fitted seasonal forecaster.
forecaster_resid_ : sktime forecaster
Fitted residual forecaster.
Examples
--------
>>> from sktime.datasets import load_airline
>>> from sktime.forecasting.trend import STLForecaster
>>> y = load_airline()
>>> forecaster = STLForecaster(sp=12) # doctest: +SKIP
>>> forecaster.fit(y) # doctest: +SKIP
STLForecaster(...)
>>> y_pred = forecaster.predict(fh=[1,2,3]) # doctest: +SKIP
See Also
--------
Deseasonalizer
Detrender
References
----------
.. [1] R. B. Cleveland, W. S. Cleveland, J.E. McRae, and I. Terpenning (1990)
STL: A Seasonal-Trend Decomposition Procedure Based on LOESS.
Journal of Official Statistics, 6, 3-73.
.. [2] https://www.statsmodels.org/dev/generated/statsmodels.tsa.seasonal.STL.html
"""
_tags = {
"scitype:y": "univariate", # which y are fine? univariate/multivariate/both
"ignores-exogeneous-X": False, # does estimator ignore the exogeneous X?
"handles-missing-data": False, # can estimator handle missing data?
"y_inner_mtype": "pd.Series", # which types do _fit, _predict, assume for y?
"X_inner_mtype": "pd.DataFrame", # which types do _fit, _predict, assume for X?
"requires-fh-in-fit": False, # is forecasting horizon already required in fit?
"python_dependencies": "statsmodels",
}
def __init__(
self,
sp=2,
seasonal=7,
trend=None,
low_pass=None,
seasonal_deg=1,
trend_deg=1,
low_pass_deg=1,
robust=False,
seasonal_jump=1,
trend_jump=1,
low_pass_jump=1,
inner_iter=None,
outer_iter=None,
forecaster_trend=None,
forecaster_seasonal=None,
forecaster_resid=None,
):
self.sp = sp
self.seasonal = seasonal
self.trend = trend
self.low_pass = low_pass
self.seasonal_deg = seasonal_deg
self.trend_deg = trend_deg
self.low_pass_deg = low_pass_deg
self.robust = robust
self.seasonal_jump = seasonal_jump
self.trend_jump = trend_jump
self.low_pass_jump = low_pass_jump
self.inner_iter = inner_iter
self.outer_iter = outer_iter
self.forecaster_trend = forecaster_trend
self.forecaster_seasonal = forecaster_seasonal
self.forecaster_resid = forecaster_resid
super().__init__()
def _fit(self, y, X, fh):
"""Fit forecaster to training data.
Parameters
----------
y : pd.Series
Target time series to which to fit the forecaster.
fh : int, list, np.array or ForecastingHorizon, optional (default=None)
The forecasters horizon with the steps ahead to to predict.
X : pd.DataFrame, optional (default=None)
Returns
-------
self : returns an instance of self.
"""
from statsmodels.tsa.seasonal import STL as _STL
from sktime.forecasting.naive import NaiveForecaster
self._stl = _STL(
y.values,
period=self.sp,
seasonal=self.seasonal,
trend=self.trend,
low_pass=self.low_pass,
seasonal_deg=self.seasonal_deg,
trend_deg=self.trend_deg,
low_pass_deg=self.low_pass_deg,
robust=self.robust,
seasonal_jump=self.seasonal_jump,
trend_jump=self.trend_jump,
low_pass_jump=self.low_pass_jump,
).fit(inner_iter=self.inner_iter, outer_iter=self.outer_iter)
self.seasonal_ = pd.Series(self._stl.seasonal, index=y.index)
self.resid_ = pd.Series(self._stl.resid, index=y.index)
self.trend_ = pd.Series(self._stl.trend, index=y.index)
self.forecaster_seasonal_ = (
NaiveForecaster(sp=self.sp, strategy="last")
if self.forecaster_seasonal is None
else self.forecaster_seasonal.clone()
)
# trend forecaster does not need sp
self.forecaster_trend_ = (
NaiveForecaster(strategy="drift")
if self.forecaster_trend is None
else self.forecaster_trend.clone()
)
self.forecaster_resid_ = (
NaiveForecaster(sp=self.sp, strategy="mean")
if self.forecaster_resid is None
else self.forecaster_resid.clone()
)
# fitting forecasters to different components
self.forecaster_seasonal_.fit(y=self.seasonal_, X=X, fh=fh)
self.forecaster_trend_.fit(y=self.trend_, X=X, fh=fh)
self.forecaster_resid_.fit(y=self.resid_, X=X, fh=fh)
def _predict(self, fh, X):
"""Forecast time series at future horizon.
Parameters
----------
fh : int, list, np.array or ForecastingHorizon
Forecasting horizon
X : pd.DataFrame, optional (default=None)
Exogenous time series
Returns
-------
y_pred : pd.Series
Point predictions
"""
y_pred_seasonal = self.forecaster_seasonal_.predict(fh=fh, X=X)
y_pred_trend = self.forecaster_trend_.predict(fh=fh, X=X)
y_pred_resid = self.forecaster_resid_.predict(fh=fh, X=X)
y_pred = y_pred_seasonal + y_pred_trend + y_pred_resid
y_pred.name = self._y.name
return y_pred
def _update(self, y, X=None, update_params=True):
"""Update cutoff value and, optionally, fitted parameters.
Parameters
----------
y : pd.Series, pd.DataFrame, or np.array
Target time series to which to fit the forecaster.
X : pd.DataFrame, optional (default=None)
Exogeneous data
update_params : bool, optional (default=True)
whether model parameters should be updated
Returns
-------
self : reference to self
"""
from statsmodels.tsa.seasonal import STL as _STL
self._stl = _STL(
y.values,
period=self.sp,
seasonal=self.seasonal,
trend=self.trend,
low_pass=self.low_pass,
seasonal_deg=self.seasonal_deg,
trend_deg=self.trend_deg,
low_pass_deg=self.low_pass_deg,
robust=self.robust,
seasonal_jump=self.seasonal_jump,
trend_jump=self.trend_jump,
low_pass_jump=self.low_pass_jump,
).fit(inner_iter=self.inner_iter, outer_iter=self.outer_iter)
self.seasonal_ = pd.Series(self._stl.seasonal, index=y.index)
self.resid_ = pd.Series(self._stl.resid, index=y.index)
self.trend_ = pd.Series(self._stl.trend, index=y.index)
self.forecaster_seasonal_.update(
y=self.seasonal_, X=X, update_params=update_params
)
self.forecaster_trend_.update(y=self.trend_, X=X, update_params=update_params)
self.forecaster_resid_.update(y=self.resid_, X=X, update_params=update_params)
return self
@classmethod
def get_test_params(cls, parameter_set="default"):
"""Return testing parameter settings for the estimator.
Parameters
----------
parameter_set : str, default="default"
Name of the set of test parameters to return, for use in tests. If no
special parameters are defined for a value, will return `"default"` set.
Returns
-------
params : dict or list of dict, default = {}
Parameters to create testing instances of the class
Each dict are parameters to construct an "interesting" test instance, i.e.,
`MyClass(**params)` or `MyClass(**params[i])` creates a valid test instance.
`create_test_instance` uses the first (or only) dictionary in `params`
"""
from sktime.forecasting.naive import NaiveForecaster
params_list = [
{},
{
"sp": 3,
"seasonal": 7,
"trend": 5,
"seasonal_deg": 2,
"trend_deg": 2,
"robust": True,
"seasonal_jump": 2,
"trend_jump": 2,
"low_pass_jump": 2,
"forecaster_trend": NaiveForecaster(strategy="drift"),
"forecaster_seasonal": NaiveForecaster(sp=3),
"forecaster_resid": NaiveForecaster(strategy="mean"),
},
]
return params_list
|
63b3e2265f65d2ac45a2c3c31e851317d9134618
|
8f267fe1157904023004aa1fcee8cdcaf1d69f74
|
/tempest/api/volume/admin/test_volume_services_negative.py
|
bf39be5ada9eafdf91d7f014460c26549c6b0fd5
|
[
"Apache-2.0"
] |
permissive
|
openstack/tempest
|
a65737f3e62d4ebeb7e387feac7bcc636d3f5fe0
|
3932a799e620a20d7abf7b89e21b520683a1809b
|
refs/heads/master
| 2023-08-28T15:04:21.241805
| 2023-08-28T10:16:57
| 2023-08-28T10:16:57
| 2,356,406
| 270
| 407
|
Apache-2.0
| 2022-06-29T15:52:45
| 2011-09-09T15:56:02
|
Python
|
UTF-8
|
Python
| false
| false
| 3,274
|
py
|
test_volume_services_negative.py
|
# Copyright 2018 FiberHome Telecommunication Technologies CO.,LTD
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.api.volume import base
from tempest.lib import decorators
from tempest.lib import exceptions as lib_exc
class VolumeServicesNegativeTest(base.BaseVolumeAdminTest):
"""Negative tests of volume services"""
@classmethod
def resource_setup(cls):
super(VolumeServicesNegativeTest, cls).resource_setup()
services = cls.admin_volume_services_client.list_services()['services']
cls.host = services[0]['host']
cls.binary = services[0]['binary']
@decorators.attr(type='negative')
@decorators.idempotent_id('3246ce65-ba70-4159-aa3b-082c28e4b484')
def test_enable_service_with_invalid_host(self):
"""Test enabling volume service with invalid host should fail"""
self.assertRaises(lib_exc.NotFound,
self.admin_volume_services_client.enable_service,
host='invalid_host', binary=self.binary)
@decorators.attr(type='negative')
@decorators.idempotent_id('c571f179-c6e6-4c50-a0ab-368b628a8ac1')
def test_disable_service_with_invalid_binary(self):
"""Test disabling volume service with invalid binary should fail"""
self.assertRaises(lib_exc.NotFound,
self.admin_volume_services_client.disable_service,
host=self.host, binary='invalid_binary')
@decorators.attr(type='negative')
@decorators.idempotent_id('77767b36-5e8f-4c68-a0b5-2308cc21ec64')
def test_disable_log_reason_with_no_reason(self):
"""Test disabling volume service with none reason should fail"""
self.assertRaises(lib_exc.BadRequest,
self.admin_volume_services_client.disable_log_reason,
host=self.host, binary=self.binary,
disabled_reason=None)
@decorators.attr(type='negative')
@decorators.idempotent_id('712bfab8-1f44-4eb5-a632-fa70bf78f05e')
def test_freeze_host_with_invalid_host(self):
"""Test freezing volume service with invalid host should fail"""
self.assertRaises(lib_exc.BadRequest,
self.admin_volume_services_client.freeze_host,
host='invalid_host')
@decorators.attr(type='negative')
@decorators.idempotent_id('7c6287c9-d655-47e1-9a11-76f6657a6dce')
def test_thaw_host_with_invalid_host(self):
"""Test thawing volume service with invalid host should fail"""
self.assertRaises(lib_exc.BadRequest,
self.admin_volume_services_client.thaw_host,
host='invalid_host')
|
49ab8fd507053a7b7572439d33e32c8de6da018c
|
2989f47a57cf23935a159180283463bc92d0ac18
|
/nalaf/features/relations/path.py
|
6e4a54d87c56ce441408df6a512e324d28bd096e
|
[
"Apache-2.0"
] |
permissive
|
Rostlab/nalaf
|
a5d9fa7931242c8b757064da4870a176a6364994
|
f266480174107d5c8fbff0f4431b2bb54565907e
|
refs/heads/develop
| 2022-12-13T22:08:56.464434
| 2021-06-02T11:32:07
| 2021-06-02T11:32:07
| 33,038,465
| 112
| 29
|
Apache-2.0
| 2022-12-08T05:12:45
| 2015-03-28T15:27:20
|
Python
|
UTF-8
|
Python
| false
| false
| 14,691
|
py
|
path.py
|
from nalaf.features.relations import EdgeFeatureGenerator
from nalaf.features.relations import TokenFeatureGenerator
from nltk.stem import PorterStemmer
from nalaf.utils.graph import get_path, build_walks
class PathFeatureGenerator(EdgeFeatureGenerator):
"""
The length of the path from entity 1 to entity 2 and token features for the
two tokens at the terminal of the path
"""
def __init__(
self,
graphs,
token_feature_generator,
prefix_45_len_tokens,
prefix_46_len,
prefix_47_word_in_path,
prefix_48_dep_forward,
prefix_49_dep_reverse,
prefix_50_internal_pos,
prefix_51_internal_masked_txt,
prefix_52_internal_txt,
prefix_53_internal_stem,
prefix_54_internal_dep_forward,
prefix_55_internal_dep_reverse,
prefix_56_token_path,
prefix_57_dep_style_gram,
prefix_58_edge_gram,
prefix_59_ann_edge_gram,
prefix_60_edge_directions,
prefix_61_dep_1,
prefix_62_masked_txt_dep_0,
prefix_63_pos_dep_0,
prefix_64_ann_type_1,
prefix_65_dep_to_1,
prefix_66_masked_txt_dep_to_0,
prefix_67_pos_to,
prefix_68_ann_type_2,
prefix_69_gov_g_text,
prefix_70_gov_g_pos,
prefix_71_gov_anns,
prefix_72_triple,
):
self.graphs = graphs
"""a dictionary of graphs to avoid recomputation of path"""
self.stemmer = PorterStemmer()
"""an instance of PorterStemmer"""
self.token_feature_generator = token_feature_generator
self.prefix_45_len_tokens = prefix_45_len_tokens
self.prefix_46_len = prefix_46_len
self.prefix_47_word_in_path = prefix_47_word_in_path
self.prefix_48_dep_forward = prefix_48_dep_forward
self.prefix_49_dep_reverse = prefix_49_dep_reverse
self.prefix_50_internal_pos = prefix_50_internal_pos
self.prefix_51_internal_masked_txt = prefix_51_internal_masked_txt
self.prefix_52_internal_txt = prefix_52_internal_txt
self.prefix_53_internal_stem = prefix_53_internal_stem
self.prefix_54_internal_dep_forward = prefix_54_internal_dep_forward
self.prefix_55_internal_dep_reverse = prefix_55_internal_dep_reverse
self.prefix_56_token_path = prefix_56_token_path
self.prefix_57_dep_style_gram = prefix_57_dep_style_gram
self.prefix_58_edge_gram = prefix_58_edge_gram
self.prefix_59_ann_edge_gram = prefix_59_ann_edge_gram
self.prefix_60_edge_directions = prefix_60_edge_directions
self.prefix_61_dep_1 = prefix_61_dep_1
self.prefix_62_masked_txt_dep_0 = prefix_62_masked_txt_dep_0
self.prefix_63_pos_dep_0 = prefix_63_pos_dep_0
self.prefix_64_ann_type_1 = prefix_64_ann_type_1
self.prefix_65_dep_to_1 = prefix_65_dep_to_1
self.prefix_66_masked_txt_dep_to_0 = prefix_66_masked_txt_dep_to_0
self.prefix_67_pos_to = prefix_67_pos_to
self.prefix_68_ann_type_2 = prefix_68_ann_type_2
self.prefix_69_gov_g_text = prefix_69_gov_g_text
self.prefix_70_gov_g_pos = prefix_70_gov_g_pos
self.prefix_71_gov_anns = prefix_71_gov_anns
self.prefix_72_triple = prefix_72_triple
def generate(self, dataset, feature_set, is_training_mode):
for edge in dataset.edges():
head1 = edge.entity1.head_token
head2 = edge.entity2.head_token
sentence = edge.same_part.sentences[edge.same_sentence_id]
path = []
path = get_path(head1, head2, edge.same_part, edge.same_sentence_id, self.graphs)
if len(path) == 0:
path = [head1, head2]
self.path_length_features(path, edge, feature_set, is_training_mode)
self.token_feature_generator.token_features(path[0], 'token_term_1_', edge, feature_set, is_training_mode)
self.token_feature_generator.token_features(path[-1], 'token_term_2_', edge, feature_set, is_training_mode)
self.path_dependency_features(path, edge, feature_set, is_training_mode)
base_words = ['interact', 'bind', 'coactivator', 'complex', 'mediate']
words = []
for word in base_words:
words.append(self.stemmer.stem(word))
self.path_constituents(path, edge, words, feature_set, is_training_mode)
self.path_grams(2, path, edge, feature_set, is_training_mode)
self.path_grams(3, path, edge, feature_set, is_training_mode)
self.path_grams(4, path, edge, feature_set, is_training_mode)
self.path_edge_features(path, edge, feature_set, is_training_mode)
def path_length_features(self, path, edge, feature_set, is_training_mode):
feature_name_1 = self.gen_prefix_feat_name('prefix_45_len_tokens', str(len(path)))
self.add_to_feature_set(feature_set, is_training_mode, edge, feature_name_1)
feature_name_2 = self.gen_prefix_feat_name('prefix_46_len')
self.add_to_feature_set(feature_set, is_training_mode, edge, feature_name_2, value=len(path))
def path_constituents(self, path, edge, words, feature_set, is_training_mode):
for token in path:
if self.stemmer.stem(token.word) in words:
feature_name_1 = self.gen_prefix_feat_name('prefix_47_word_in_path', self.stemmer.stem(token.word))
self.add_to_feature_set(feature_set, is_training_mode, edge, feature_name_1)
def path_dependency_features(self, path, edge, feature_set, is_training_mode):
for i in range(len(path) - 1):
token1 = path[i]
token2 = path[i + 1]
for dep in token1.features['dependency_to']:
if dep[0] == token2:
feature_name = self.gen_prefix_feat_name('prefix_48_dep_forward', dep[1])
self.add_to_feature_set(feature_set, is_training_mode, edge, feature_name)
for dep in token2.features['dependency_to']:
if dep[0] == token1:
feature_name = self.gen_prefix_feat_name('prefix_49_dep_reverse', dep[1])
self.add_to_feature_set(feature_set, is_training_mode, edge, feature_name)
for i in range(1, len(path) - 1):
token = path[i]
feature_name_1 = self.gen_prefix_feat_name('prefix_50_internal_pos', token.features['pos'])
feature_name_2 = self.gen_prefix_feat_name('prefix_51_internal_masked_txt', token.masked_text(edge.same_part))
feature_name_3 = self.gen_prefix_feat_name('prefix_52_internal_txt', token.word)
feature_name_4 = self.gen_prefix_feat_name('prefix_53_internal_stem', self.stemmer.stem(token.word))
self.add_to_feature_set(feature_set, is_training_mode, edge, feature_name_1)
self.add_to_feature_set(feature_set, is_training_mode, edge, feature_name_2)
self.add_to_feature_set(feature_set, is_training_mode, edge, feature_name_3)
self.add_to_feature_set(feature_set, is_training_mode, edge, feature_name_4)
for i in range(2, len(path) - 1):
token1 = path[i]
token2 = path[i + 1]
for dep in token1.features['dependency_to']:
if dep[0] == token2:
feature_name = self.gen_prefix_feat_name('prefix_54_internal_dep_forward', dep[1])
self.add_to_feature_set(feature_set, is_training_mode, edge, feature_name)
for dep in token2.features['dependency_to']:
if dep[0] == token1:
feature_name = self.gen_prefix_feat_name('prefix_55_internal_dep_reverse', dep[1])
self.add_to_feature_set(feature_set, is_training_mode, edge, feature_name)
def build_walk_paths(self, path, edge, feature_set, is_training_mode):
internal_types = ''
for token in path:
ann_types = self.token_feature_generator.annotated_types(token, edge)
for ann in ann_types:
internal_types += '_'+ann
internal_types += '_'
feature_name = self.gen_prefix_feat_name('prefix_56_token_path', internal_types)
self.add_to_feature_set(feature_set, is_training_mode, edge, feature_name)
def path_grams(self, n, path, edge, feature_set, is_training_mode):
token1 = path[0]
token2 = path[-1]
token1_anns = self.token_feature_generator.annotated_types(token1, edge)
token2_anns = self.token_feature_generator.annotated_types(token2, edge)
self.build_walk_paths(path, edge, feature_set, is_training_mode)
all_walks = build_walks(path)
for i in range(len(all_walks)):
dir_grams = ''
for j in range(len(path) - 1):
current_walk = all_walks[i]
if current_walk[j][0].features['dependency_from'][0] == path[i]:
dir_grams += 'F' # Forward
else:
dir_grams += 'R' # Reverse
if i>=n-1:
style_gram = ''
style_gram = dir_grams[i-n+1:i + 1]
edge_gram = 'dep_gram_' + style_gram
for k in range(1, n):
token = edge.same_part.sentences[edge.same_sentence_id][(path[i-(n-1)+k]).features['id']-1]
self.token_feature_generator.token_features(token, 'tok_'+style_gram, edge, feature_set, is_training_mode)
for k in range(n):
dep = current_walk[i-(n-1)+k][1]
feature_name = self.gen_prefix_feat_name('prefix_57_dep_style_gram', style_gram, str(k), dep)
self.add_to_feature_set(feature_set, is_training_mode, edge, feature_name)
edge_gram += '_' + dep
feature_name = self.gen_prefix_feat_name('prefix_58_edge_gram', edge_gram)
self.add_to_feature_set(feature_set, is_training_mode, edge, feature_name)
for ann1 in token1_anns:
for ann2 in token2_anns:
feature_name = self.gen_prefix_feat_name('prefix_59_ann_edge_gram', ann1, edge_gram, ann2)
self.add_to_feature_set(feature_set, is_training_mode, edge, feature_name)
# Note: relna code had this within the 2nd inner loop. This was different to what's in original LocText
# and likely an unintended bug. The difference was spotted by Madhukar
feature_name = self.gen_prefix_feat_name('prefix_60_edge_directions', dir_grams)
self.add_to_feature_set(feature_set, is_training_mode, edge, feature_name)
def path_edge_features(self, path, edge, feature_set, is_training_mode):
head1 = edge.entity1.head_token
head2 = edge.entity2.head_token
dependency_list = []
for i in range(len(path) - 1):
token1 = path[i]
token2 = path[i + 1]
dependency_list.append(token2.features['dependency_from'])
dependency_list.append(token1.features['dependency_from'])
for dependency in dependency_list:
feature_name = self.gen_prefix_feat_name('prefix_61_dep_1', dependency[1])
self.add_to_feature_set(feature_set, is_training_mode, edge, feature_name)
feature_name = self.gen_prefix_feat_name('prefix_62_masked_txt_dep_0', dependency[0].masked_text(edge.same_part))
self.add_to_feature_set(feature_set, is_training_mode, edge, feature_name)
feature_name = self.gen_prefix_feat_name('prefix_63_pos_dep_0', dependency[0].features['pos'])
self.add_to_feature_set(feature_set, is_training_mode, edge, feature_name)
token1 = dependency[0]
ann_types_1 = self.token_feature_generator.annotated_types(token1, edge)
for ann in ann_types_1:
feature_name = self.gen_prefix_feat_name('prefix_64_ann_type_1', ann)
self.add_to_feature_set(feature_set, is_training_mode, edge, feature_name)
g_text = dependency[0].masked_text(edge.same_part)
g_pos = dependency[0].features['pos']
g_at = 'no_ann_type'
# TODO Juanmi: I do not understand why the extra inner loop here (not in original LocText)
# I think it's just trying to get the dep-to token. However, that should already be included in dependency[1]
# to the best of my knowledge. Also `depdnency_to` is now broken
for dep in dependency[0].features['dependency_to']:
feature_name = self.gen_prefix_feat_name('prefix_65_dep_to_1', dep[1])
self.add_to_feature_set(feature_set, is_training_mode, edge, feature_name)
feature_name = self.gen_prefix_feat_name('prefix_66_masked_txt_dep_to_0', dep[0].masked_text(edge.same_part))
self.add_to_feature_set(feature_set, is_training_mode, edge, feature_name)
feature_name = self.gen_prefix_feat_name('prefix_67_pos_to', dep[0].features['pos'])
self.add_to_feature_set(feature_set, is_training_mode, edge, feature_name)
token2 = dep[0]
ann_types_2 = self.token_feature_generator.annotated_types(token2, edge)
for ann in ann_types_2:
feature_name = self.gen_prefix_feat_name('prefix_68_ann_type_2', ann)
self.add_to_feature_set(feature_set, is_training_mode, edge, feature_name)
d_text = token2.masked_text(edge.same_part)
d_pos = token2.features['pos']
d_at = 'no_ann_type'
feature_name = self.gen_prefix_feat_name('prefix_69_gov_g_text', g_text, d_text)
self.add_to_feature_set(feature_set, is_training_mode, edge, feature_name)
feature_name = self.gen_prefix_feat_name('prefix_70_gov_g_pos', g_pos, d_pos)
self.add_to_feature_set(feature_set, is_training_mode, edge, feature_name)
for ann1 in ann_types_1:
for ann2 in ann_types_2:
feature_name = self.gen_prefix_feat_name('prefix_71_gov_anns', ann1, ann2)
self.add_to_feature_set(feature_set, is_training_mode, edge, feature_name)
for ann1 in ann_types_1:
feature_name = self.gen_prefix_feat_name('prefix_72_triple', ann1, dependency[1], d_at)
self.add_to_feature_set(feature_set, is_training_mode, edge, feature_name)
|
efa32c877a879adf9082c55a039adb28b9ea4036
|
a3d6556180e74af7b555f8d47d3fea55b94bcbda
|
/build/toolchain/win/ml.py
|
8cc2c9e1ea4281eeffed1108c135a546de97613e
|
[
"BSD-3-Clause"
] |
permissive
|
chromium/chromium
|
aaa9eda10115b50b0616d2f1aed5ef35d1d779d6
|
a401d6cf4f7bf0e2d2e964c512ebb923c3d8832c
|
refs/heads/main
| 2023-08-24T00:35:12.585945
| 2023-08-23T22:01:11
| 2023-08-23T22:01:11
| 120,360,765
| 17,408
| 7,102
|
BSD-3-Clause
| 2023-09-10T23:44:27
| 2018-02-05T20:55:32
| null |
UTF-8
|
Python
| false
| false
| 11,861
|
py
|
ml.py
|
#!/usr/bin/env python3
# Copyright 2018 The Chromium Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Wraps ml.exe or ml64.exe and postprocesses the output to be deterministic.
Sets timestamp in .obj file to 0, hence incompatible with link.exe /incremental.
Use by prefixing the ml(64).exe invocation with this script:
python ml.py ml.exe [args...]"""
import array
import collections
import struct
import subprocess
import sys
class Struct(object):
"""A thin wrapper around the struct module that returns a namedtuple"""
def __init__(self, name, *args):
"""Pass the name of the return type, and then an interleaved list of
format strings as used by the struct module and of field names."""
self.fmt = '<' + ''.join(args[0::2])
self.type = collections.namedtuple(name, args[1::2])
def pack_into(self, buffer, offset, data):
return struct.pack_into(self.fmt, buffer, offset, *data)
def unpack_from(self, buffer, offset=0):
return self.type(*struct.unpack_from(self.fmt, buffer, offset))
def size(self):
return struct.calcsize(self.fmt)
def Subtract(nt, **kwargs):
"""Subtract(nt, f=2) returns a new namedtuple with 2 subtracted from nt.f"""
return nt._replace(**{k: getattr(nt, k) - v for k, v in kwargs.items()})
def MakeDeterministic(objdata):
# Takes data produced by ml(64).exe (without any special flags) and
# 1. Sets the timestamp to 0
# 2. Strips the .debug$S section (which contains an unwanted absolute path)
# This makes several assumptions about ml's output:
# - Section data is in the same order as the corresponding section headers:
# section headers preceding the .debug$S section header have their data
# preceding the .debug$S section data; likewise for section headers
# following the .debug$S section.
# - The .debug$S section contains only the absolute path to the obj file and
# nothing else, in particular there's only a single entry in the symbol
# table referring to the .debug$S section.
# - There are no COFF line number entries.
# - There's no IMAGE_SYM_CLASS_CLR_TOKEN symbol.
# These seem to hold in practice; if they stop holding this script needs to
# become smarter.
objdata = array.array('b', objdata) # Writable, e.g. via struct.pack_into.
# Read coff header.
COFFHEADER = Struct('COFFHEADER', 'H', 'Machine', 'H', 'NumberOfSections',
'I', 'TimeDateStamp', 'I', 'PointerToSymbolTable', 'I',
'NumberOfSymbols', 'H', 'SizeOfOptionalHeader', 'H',
'Characteristics')
coff_header = COFFHEADER.unpack_from(objdata)
assert coff_header.SizeOfOptionalHeader == 0 # Only set for binaries.
# Read section headers following coff header.
SECTIONHEADER = Struct('SECTIONHEADER', '8s', 'Name', 'I', 'VirtualSize', 'I',
'VirtualAddress', 'I', 'SizeOfRawData', 'I',
'PointerToRawData', 'I', 'PointerToRelocations', 'I',
'PointerToLineNumbers', 'H', 'NumberOfRelocations',
'H', 'NumberOfLineNumbers', 'I', 'Characteristics')
section_headers = []
debug_section_index = -1
for i in range(0, coff_header.NumberOfSections):
section_header = SECTIONHEADER.unpack_from(objdata,
offset=COFFHEADER.size() +
i * SECTIONHEADER.size())
assert not section_header[0].startswith(b'/') # Support short names only.
section_headers.append(section_header)
if section_header.Name == b'.debug$S':
assert debug_section_index == -1
debug_section_index = i
assert debug_section_index != -1
data_start = COFFHEADER.size() + len(section_headers) * SECTIONHEADER.size()
# Verify the .debug$S section looks like we expect.
assert section_headers[debug_section_index].Name == b'.debug$S'
assert section_headers[debug_section_index].VirtualSize == 0
assert section_headers[debug_section_index].VirtualAddress == 0
debug_size = section_headers[debug_section_index].SizeOfRawData
debug_offset = section_headers[debug_section_index].PointerToRawData
assert section_headers[debug_section_index].PointerToRelocations == 0
assert section_headers[debug_section_index].PointerToLineNumbers == 0
assert section_headers[debug_section_index].NumberOfRelocations == 0
assert section_headers[debug_section_index].NumberOfLineNumbers == 0
# Make sure sections in front of .debug$S have their data preceding it.
for header in section_headers[:debug_section_index]:
assert header.PointerToRawData < debug_offset
assert header.PointerToRelocations < debug_offset
assert header.PointerToLineNumbers < debug_offset
# Make sure sections after of .debug$S have their data following it.
for header in section_headers[debug_section_index + 1:]:
# Make sure the .debug$S data is at the very end of section data:
assert header.PointerToRawData > debug_offset
assert header.PointerToRelocations == 0
assert header.PointerToLineNumbers == 0
# Make sure the first non-empty section's data starts right after the section
# headers.
for section_header in section_headers:
if section_header.PointerToRawData == 0:
assert section_header.PointerToRelocations == 0
assert section_header.PointerToLineNumbers == 0
continue
assert section_header.PointerToRawData == data_start
break
# Make sure the symbol table (and hence, string table) appear after the last
# section:
assert (
coff_header.PointerToSymbolTable >=
section_headers[-1].PointerToRawData + section_headers[-1].SizeOfRawData)
# The symbol table contains a symbol for the no-longer-present .debug$S
# section. If we leave it there, lld-link will complain:
#
# lld-link: error: .debug$S should not refer to non-existent section 5
#
# so we need to remove that symbol table entry as well. This shifts symbol
# entries around and we need to update symbol table indices in:
# - relocations
# - line number records (never present)
# - one aux symbol entry (IMAGE_SYM_CLASS_CLR_TOKEN; not present in ml output)
SYM = Struct(
'SYM',
'8s',
'Name',
'I',
'Value',
'h',
'SectionNumber', # Note: Signed!
'H',
'Type',
'B',
'StorageClass',
'B',
'NumberOfAuxSymbols')
i = 0
debug_sym = -1
while i < coff_header.NumberOfSymbols:
sym_offset = coff_header.PointerToSymbolTable + i * SYM.size()
sym = SYM.unpack_from(objdata, sym_offset)
# 107 is IMAGE_SYM_CLASS_CLR_TOKEN, which has aux entry "CLR Token
# Definition", which contains a symbol index. Check it's never present.
assert sym.StorageClass != 107
# Note: sym.SectionNumber is 1-based, debug_section_index is 0-based.
if sym.SectionNumber - 1 == debug_section_index:
assert debug_sym == -1, 'more than one .debug$S symbol found'
debug_sym = i
# Make sure the .debug$S symbol looks like we expect.
# In particular, it should have exactly one aux symbol.
assert sym.Name == b'.debug$S'
assert sym.Value == 0
assert sym.Type == 0
assert sym.StorageClass == 3
assert sym.NumberOfAuxSymbols == 1
elif sym.SectionNumber > debug_section_index:
sym = Subtract(sym, SectionNumber=1)
SYM.pack_into(objdata, sym_offset, sym)
i += 1 + sym.NumberOfAuxSymbols
assert debug_sym != -1, '.debug$S symbol not found'
# Note: Usually the .debug$S section is the last, but for files saying
# `includelib foo.lib`, like safe_terminate_process.asm in 32-bit builds,
# this isn't true: .drectve is after .debug$S.
# Update symbol table indices in relocations.
# There are a few processor types that have one or two relocation types
# where SymbolTableIndex has a different meaning, but not for x86.
REL = Struct('REL', 'I', 'VirtualAddress', 'I', 'SymbolTableIndex', 'H',
'Type')
for header in section_headers[0:debug_section_index]:
for j in range(0, header.NumberOfRelocations):
rel_offset = header.PointerToRelocations + j * REL.size()
rel = REL.unpack_from(objdata, rel_offset)
assert rel.SymbolTableIndex != debug_sym
if rel.SymbolTableIndex > debug_sym:
rel = Subtract(rel, SymbolTableIndex=2)
REL.pack_into(objdata, rel_offset, rel)
# Update symbol table indices in line numbers -- just check they don't exist.
for header in section_headers:
assert header.NumberOfLineNumbers == 0
# Now that all indices are updated, remove the symbol table entry referring to
# .debug$S and its aux entry.
del objdata[coff_header.PointerToSymbolTable +
debug_sym * SYM.size():coff_header.PointerToSymbolTable +
(debug_sym + 2) * SYM.size()]
# Now we know that it's safe to write out the input data, with just the
# timestamp overwritten to 0, the last section header cut out (and the
# offsets of all other section headers decremented by the size of that
# one section header), and the last section's data cut out. The symbol
# table offset needs to be reduced by one section header and the size of
# the missing section.
# (The COFF spec only requires on-disk sections to be aligned in image files,
# for obj files it's not required. If that wasn't the case, deleting slices
# if data would not generally be safe.)
# Update section offsets and remove .debug$S section data.
for i in range(0, debug_section_index):
header = section_headers[i]
if header.SizeOfRawData:
header = Subtract(header, PointerToRawData=SECTIONHEADER.size())
if header.NumberOfRelocations:
header = Subtract(header, PointerToRelocations=SECTIONHEADER.size())
if header.NumberOfLineNumbers:
header = Subtract(header, PointerToLineNumbers=SECTIONHEADER.size())
SECTIONHEADER.pack_into(objdata,
COFFHEADER.size() + i * SECTIONHEADER.size(),
header)
for i in range(debug_section_index + 1, len(section_headers)):
header = section_headers[i]
shift = SECTIONHEADER.size() + debug_size
if header.SizeOfRawData:
header = Subtract(header, PointerToRawData=shift)
if header.NumberOfRelocations:
header = Subtract(header, PointerToRelocations=shift)
if header.NumberOfLineNumbers:
header = Subtract(header, PointerToLineNumbers=shift)
SECTIONHEADER.pack_into(objdata,
COFFHEADER.size() + i * SECTIONHEADER.size(),
header)
del objdata[debug_offset:debug_offset + debug_size]
# Finally, remove .debug$S section header and update coff header.
coff_header = coff_header._replace(TimeDateStamp=0)
coff_header = Subtract(coff_header,
NumberOfSections=1,
PointerToSymbolTable=SECTIONHEADER.size() + debug_size,
NumberOfSymbols=2)
COFFHEADER.pack_into(objdata, 0, coff_header)
del objdata[COFFHEADER.size() +
debug_section_index * SECTIONHEADER.size():COFFHEADER.size() +
(debug_section_index + 1) * SECTIONHEADER.size()]
# All done!
if sys.version_info.major == 2:
return objdata.tostring()
else:
return objdata.tobytes()
def main():
ml_result = subprocess.call(sys.argv[1:])
if ml_result != 0:
return ml_result
objfile = None
for i in range(1, len(sys.argv)):
if sys.argv[i].startswith('/Fo'):
objfile = sys.argv[i][len('/Fo'):]
assert objfile, 'failed to find ml output'
with open(objfile, 'rb') as f:
objdata = f.read()
objdata = MakeDeterministic(objdata)
with open(objfile, 'wb') as f:
f.write(objdata)
if __name__ == '__main__':
sys.exit(main())
|
6a5e6ed2b3e0d18eced9299cb08190ef2ad044e2
|
0933f9ecf49ed89db35cee051a64648886f13e40
|
/fs/opener/zipfs.py
|
10c979ccbed03bcc024433058c4ebbc2af29747d
|
[
"MIT"
] |
permissive
|
PyFilesystem/pyfilesystem2
|
63da155692594d0405dd237db7d66be243658249
|
8ed9dc495d8ba2f83fbb2a1145d34d92e13644be
|
refs/heads/master
| 2023-09-01T17:05:54.176292
| 2022-10-18T10:59:07
| 2022-10-18T10:59:07
| 70,920,962
| 1,956
| 254
|
MIT
| 2023-08-24T20:00:22
| 2016-10-14T15:05:27
|
Python
|
UTF-8
|
Python
| false
| false
| 925
|
py
|
zipfs.py
|
# coding: utf-8
"""`ZipFS` opener definition.
"""
from __future__ import absolute_import, print_function, unicode_literals
import typing
from .base import Opener
from .errors import NotWriteable
from .registry import registry
if typing.TYPE_CHECKING:
from typing import Text
from ..zipfs import ZipFS # noqa: F401
from .parse import ParseResult
@registry.install
class ZipOpener(Opener):
"""`ZipFS` opener."""
protocols = ["zip"]
def open_fs(
self,
fs_url, # type: Text
parse_result, # type: ParseResult
writeable, # type: bool
create, # type: bool
cwd, # type: Text
):
# type: (...) -> ZipFS
from ..zipfs import ZipFS
if not create and writeable:
raise NotWriteable("Unable to open existing ZIP file for writing")
zip_fs = ZipFS(parse_result.resource, write=create)
return zip_fs
|
b0c46d039767ca8d1a39e2909e76b38c086b293a
|
674caa0d07acfa73a49a8762ca48eccef22e68d8
|
/parsers/id3v2.py
|
74399d88da0aa89f83d4a3dc358f223fe4f722dc
|
[
"MIT"
] |
permissive
|
corkami/mitra
|
2e52d85d7a6fc158155299e8e5e786260fdc1759
|
c2a1939465a36b70a0ddf37c18e67c71352d8c9c
|
refs/heads/master
| 2023-04-16T23:49:36.719441
| 2023-04-11T09:37:40
| 2023-04-11T09:37:40
| 297,723,842
| 1,113
| 80
|
MIT
| 2020-11-02T15:54:03
| 2020-09-22T17:33:10
|
Python
|
UTF-8
|
Python
| false
| false
| 1,362
|
py
|
id3v2.py
|
#!/usr/bin/env python3
# MP3 (with an ID3v2.3/4 header)
from parsers import FType
import struct
def _7to8(d):
b3, b2, b1, b0 = struct.unpack('>4B', d[:4])
assert b3 < 0x80
assert b2 < 0x80
assert b1 < 0x80
assert b0 < 0x80
return (((b3 * 0x80 + b2) * 0x80 + b1) * 0x80 + b0)
assert _7to8(b"\0\0\0\x7F") == 127
assert _7to8(b"\0\0\1\0") == 128
assert _7to8(b"\0\0\x6a\x7F") == 13695
def _8to7(n):
l = []
for i in range(4):
l += [n % 0x80]
n = n // 0x80
return bytes(l[::-1])
assert _8to7(127) == b"\0\0\0\x7f"
assert _8to7(128) == b"\0\0\1\0"
assert _8to7(13695) == b"\0\0\x6a\x7F"
class parser(FType):
DESC = "ID3v2 [Tag]"
TYPE = "ID3v2"
MAGIC = b"ID3\3\0"
def __init__(self, data=""):
FType.__init__(self, data)
self.data = data
self.bParasite = True
self.cut = 0xA
self.parasite_o = 0x14
self.parasite_s = 0xffffff # ?
# the ID3 header prevents the appended data from being interpreted
# but it can conflict with an ID3v1 footer if also present
self.bAppData = True
self.prewrap = 4+4+2
def wrap(self, data, type_=b"JUNK"):
wrapped = b"".join([
type_,
_8to7(len(data)),
b"\0\0",
data,
])
return wrapped
def fixformat(self, d, delta):
SizeOff = 6
SizeLen = 4
d = b"".join([
d[:SizeOff],
_8to7(_7to8(d[SizeOff:SizeOff+SizeLen]) + delta),
d[SizeOff+SizeLen:]])
return d
|
4dc8c09b566dd4e14b3632a65d92896ab71aa180
|
b5ce6908490cfb8e6a1e1cbe4745d675122ddce0
|
/questions/day-of-the-week/Solution.py
|
ada2a7e2cb25f838dc21eaded1b0d221dfad08b1
|
[
"MIT"
] |
permissive
|
franklingu/leetcode-solutions
|
8895910f13208e1d8e604100d84c2dd35684cde4
|
7ad7e5c1c040510b7b7bd225ed4297054464dbc6
|
refs/heads/master
| 2023-01-09T01:34:08.097518
| 2023-01-02T02:05:35
| 2023-01-02T02:05:35
| 43,345,677
| 155
| 66
|
MIT
| 2020-10-02T03:41:36
| 2015-09-29T04:54:38
|
Python
|
UTF-8
|
Python
| false
| false
| 752
|
py
|
Solution.py
|
"""
Given a date, return the corresponding day of the week for that date.
The input is given as three integers representing the day, month and year respectively.
Return the answer as one of the following values {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}.
Example 1:
Input: day = 31, month = 8, year = 2019
Output: "Saturday"
Example 2:
Input: day = 18, month = 7, year = 1999
Output: "Sunday"
Example 3:
Input: day = 15, month = 8, year = 1993
Output: "Sunday"
Constraints:
The given dates are valid dates between the years 1971 and 2100.
"""
class Solution:
def dayOfTheWeek(self, day: int, month: int, year: int) -> str:
return datetime.datetime(year, month, day).strftime('%A')
|
66f6fef98450286231290b0ce89e0ce80851cb32
|
cad91ae76d2746a6c28ddda0f33a58f9d461378f
|
/PyTorch/DrugDiscovery/MoFlow/moflow/model/model.py
|
83e39950fccd78648193047b50b5d78c4222cb30
|
[
"MIT",
"Apache-2.0"
] |
permissive
|
NVIDIA/DeepLearningExamples
|
fe677521e7e2a16e3cb0b77e358f9aab72f8c11a
|
a5388a45f71a949639b35cc5b990bd130d2d8164
|
refs/heads/master
| 2023-08-31T20:57:08.798455
| 2023-08-23T10:09:12
| 2023-08-23T10:09:12
| 131,881,622
| 11,838
| 3,124
| null | 2023-08-28T16:57:33
| 2018-05-02T17:04:05
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 10,235
|
py
|
model.py
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright 2020 Chengxi Zang
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import math
import torch
import torch.nn as nn
from moflow.config import Config
from moflow.model.glow import Glow, GlowOnGraph
def gaussian_nll(x, mean, ln_var):
"""Computes the negative log-likelihood of a Gaussian distribution.
Given two variable ``mean`` representing :math:`\\mu` and ``ln_var``
representing :math:`\\log(\\sigma^2)`, this function computes in
elementwise manner the negative log-likelihood of :math:`x` on a
Gaussian distribution :math:`N(\\mu, S)`,
.. math::
-\\log N(x; \\mu, \\sigma^2) =
\\log\\left(\\sqrt{(2\\pi)^D |S|}\\right) +
\\frac{1}{2}(x - \\mu)^\\top S^{-1}(x - \\mu),
where :math:`D` is a dimension of :math:`x` and :math:`S` is a diagonal
matrix where :math:`S_{ii} = \\sigma_i^2`.
Args:
x: Input variable.
mean: Mean of a Gaussian distribution, :math:`\\mu`.
ln_var: Logarithm of variance of a Gaussian distribution,
:math:`\\log(\\sigma^2)`.
Returns:
torch.Tensor:
Negative log-likelihood.
"""
x_prec = torch.exp(-ln_var)
x_diff = x - mean
x_power = (x_diff * x_diff) * x_prec * -0.5
loss = (ln_var + math.log(2 * (math.pi))) / 2 - x_power
return loss
class MoFlowLoss(nn.Module):
def __init__(self, config: Config) -> None:
super().__init__()
self.b_n_type = config.num_edge_features
self.a_n_node = config.max_num_nodes
self.a_n_type = config.num_node_features
self.b_size = self.a_n_node * self.a_n_node * self.b_n_type
self.a_size = self.a_n_node * self.a_n_type
if config.model_config.learn_dist:
self.ln_var = nn.Parameter(torch.zeros(1))
else:
self.register_buffer('ln_var', torch.zeros(1))
def forward(self, h, adj_h, sum_log_det_jacs_x, sum_log_det_jacs_adj):
z = [h, adj_h]
logdet = [sum_log_det_jacs_x, sum_log_det_jacs_adj]
device = z[0].device
dtype = z[0].dtype
z[0] = z[0].reshape(z[0].shape[0],-1)
z[1] = z[1].reshape(z[1].shape[0], -1)
logdet[0] = logdet[0] - self.a_size * math.log(2.)
logdet[1] = logdet[1] - self.b_size * math.log(2.)
ln_var_adj = self.ln_var * torch.ones([self.b_size], device=device, dtype=dtype)
ln_var_x = self.ln_var * torch.ones([self.a_size], device=device, dtype=dtype)
nll_adj = torch.mean(
torch.sum(gaussian_nll(z[1], torch.zeros(self.b_size, device=device, dtype=dtype), ln_var_adj), dim=1)
- logdet[1])
nll_adj = nll_adj / (self.b_size * math.log(2.)) # the negative log likelihood per dim with log base 2
nll_x = torch.mean(torch.sum(
gaussian_nll(z[0], torch.zeros(self.a_size, device=device, dtype=dtype), ln_var_x),
dim=1) - logdet[0])
nll_x = nll_x / (self.a_size * math.log(2.)) # the negative log likelihood per dim with log base 2
return nll_x, nll_adj
class MoFlow(nn.Module):
def __init__(self, config: Config):
super(MoFlow, self).__init__()
self.config = config
self.b_n_type = config.num_edge_features
self.a_n_node = config.max_num_nodes
self.a_n_type = config.num_node_features
self.b_size = self.a_n_node * self.a_n_node * self.b_n_type
self.a_size = self.a_n_node * self.a_n_type
self.noise_scale = config.model_config.noise_scale
self.bond_model = Glow(
in_channel=self.b_n_type,
n_flow=config.model_config.bond_config.n_flow,
n_block=config.model_config.bond_config.n_block,
squeeze_fold=config.model_config.bond_config.n_squeeze,
hidden_channel=config.model_config.bond_config.hidden_ch,
conv_lu=config.model_config.bond_config.conv_lu
)
self.atom_model = GlowOnGraph(
n_node=self.a_n_node,
in_dim=self.a_n_type,
hidden_dim_dict={
'gnn': config.model_config.atom_config.hidden_gnn,
'linear': config.model_config.atom_config.hidden_lin
},
n_flow=config.model_config.atom_config.n_flow,
n_block=config.model_config.atom_config.n_block,
mask_row_size_list=config.model_config.atom_config.mask_row_size_list,
mask_row_stride_list=config.model_config.atom_config.mask_row_stride_list,
)
self._cuda_graphs = dict()
self.atom_stream = None
self.bond_stream = None
@torch.jit.ignore
def forward(self, adj: torch.Tensor, x: torch.Tensor, with_cuda_graph: bool = False):
"""
:param adj: (256,4,9,9)
:param x: (256,9,5)
:return:
"""
if with_cuda_graph and self.atom_stream is None:
self.atom_stream = torch.cuda.Stream()
self.bond_stream = torch.cuda.Stream()
h = x
# add uniform noise to node feature matrices
if self.training:
if self.noise_scale == 0:
h = h/2.0 - 0.5 + torch.rand_like(x) * 0.4
else:
h = h + torch.rand_like(x) * self.noise_scale
if with_cuda_graph:
if self.atom_model not in self._cuda_graphs:
h, sum_log_det_jacs_x = self._forward_graph(self.atom_model, adj, h)
else:
self.atom_stream.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(self.atom_stream):
h, sum_log_det_jacs_x = self._forward_graph(self.atom_model, adj, h)
else:
h, sum_log_det_jacs_x = self.atom_model(adj, h)
# add uniform noise to adjacency tensors
if self.training:
if self.noise_scale == 0:
adj_bond = adj/2.0 - 0.5 + torch.rand_like(adj) * 0.4
else:
adj_bond = adj + torch.rand_like(adj) * self.noise_scale
else:
adj_bond = adj
if with_cuda_graph:
if self.bond_model not in self._cuda_graphs:
adj_h, sum_log_det_jacs_adj = self._forward_graph(self.bond_model, adj_bond)
else:
self.bond_stream.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(self.bond_stream):
adj_h, sum_log_det_jacs_adj = self._forward_graph(self.bond_model, adj_bond)
else:
adj_h, sum_log_det_jacs_adj = self.bond_model(adj_bond)
if with_cuda_graph:
torch.cuda.current_stream().wait_stream(self.atom_stream)
torch.cuda.current_stream().wait_stream(self.bond_stream)
return h, adj_h, sum_log_det_jacs_x, sum_log_det_jacs_adj
@torch.jit.export
def reverse(self, z):
"""
Returns a molecule, given its latent vector.
:param z: latent vector. Shape: [B, N*N*M + N*T]
B = Batch size, N = number of atoms, M = number of bond types,
T = number of atom types (Carbon, Oxygen etc.)
:return: adjacency matrix and feature matrix of a molecule
"""
batch_size = z.shape[0]
z_x = z[:, :self.a_size]
z_adj = z[:, self.a_size:]
h_adj = z_adj.reshape(batch_size, self.b_n_type, self.a_n_node, self.a_n_node)
h_adj = h_adj.to(memory_format=torch.channels_last)
h_adj = self.bond_model.reverse(h_adj)
if self.noise_scale == 0:
h_adj = (h_adj + 0.5) * 2
adj = h_adj
adj = adj + adj.permute(0, 1, 3, 2)
adj = adj / 2
adj = adj.softmax(dim=1)
max_bond = adj.max(dim=1).values.reshape(batch_size, -1, self.a_n_node, self.a_n_node)
adj = torch.floor(adj / max_bond)
adj = adj.to(memory_format=torch.channels_last)
h_x = z_x.reshape(batch_size, self.a_n_node, self.a_n_type)
h_x = self.atom_model.reverse((adj, h_x))
if self.noise_scale == 0:
h_x = (h_x + 0.5) * 2
return adj, h_x
@torch.jit.ignore
def _forward_graph(self, model, *args):
if model not in self._cuda_graphs:
if torch.distributed.is_initialized():
torch.distributed.barrier()
torch.cuda.synchronize()
self._cuda_graphs[model] = torch.cuda.make_graphed_callables(
model,
args,
)
torch.cuda.synchronize()
if torch.distributed.is_initialized():
torch.distributed.barrier()
return self._cuda_graphs[model](*args)
|
4d939fa18835d34f3096a38b9aa0c08a42d130af
|
219ac33431182adf338c96d048f1c6edc2374978
|
/ursina/shaders/normals_shader.py
|
c153c47b7d12904c19338c960cbc2e078e500474
|
[
"MIT"
] |
permissive
|
pokepetter/ursina
|
bd1b48f0bd58f53bfbf7c3505d7d80ea54744a0b
|
d369088aad31ef162caa8528547e45ca98c06265
|
refs/heads/master
| 2023-08-18T12:57:19.582711
| 2023-08-17T11:08:13
| 2023-08-17T11:08:13
| 97,768,581
| 2,014
| 464
|
MIT
| 2023-09-05T19:40:20
| 2017-07-19T23:04:19
|
Python
|
UTF-8
|
Python
| false
| false
| 1,309
|
py
|
normals_shader.py
|
from ursina import *
normals_shader = Shader(name='normals_shader',language=Shader.GLSL,
vertex = '''
#version 140
uniform mat4 p3d_ModelViewProjectionMatrix;
uniform mat4 p3d_ModelMatrix;
in vec4 p3d_Vertex;
in vec3 p3d_Normal;
out vec3 world_normal;
void main() {
gl_Position = p3d_ModelViewProjectionMatrix * p3d_Vertex;
world_normal = normalize(mat3(p3d_ModelMatrix) * p3d_Normal);
}
''',
fragment='''
#version 130
uniform vec4 p3d_ColorScale;
in vec2 texcoord;
out vec4 fragColor;
in vec3 world_normal;
void main() {
fragColor = vec4(world_normal*0.5+0.5, 1);
}
''',
geometry='',
)
if __name__ == '__main__':
from ursina import *
from ursina.prefabs.primitives import *
app = Ursina()
window.color=color.black
# e = Entity(model='sphere', shader=normals_shader)
# e.setShaderInput('transform_matrix', e.getNetTransform().getMat())
shader = normals_shader
a = WhiteCube(shader=shader)
b = AzureSphere(rotation_y=180, x=3)
b.shader = shader
# AzureSphere(shader=a.shader, y=2)
GrayPlane(scale=10, y=-2, texture='shore')
Sky(color=color.light_gray)
EditorCamera()
def update():
b.rotation_z += 1
b.rotation_y += 1
b.rotation_x += 1
# a.rotation_x += 1
# EditorCamera()
app.run()
|
095ff8589ffe07e29aafdca380d7c45c0d275346
|
279f415dd1e06c594c6c87deda57e201c73c4542
|
/test/espnet2/enh/layers/test_conv_utils.py
|
2341fe316810d27083de3532d532057a110657c5
|
[
"Apache-2.0"
] |
permissive
|
espnet/espnet
|
f7ba47271c1a6b1ed606dbbfb04a7f14220bb585
|
bcd20948db7846ee523443ef9fd78c7a1248c95e
|
refs/heads/master
| 2023-08-28T23:43:34.238336
| 2023-08-23T02:51:39
| 2023-08-23T02:51:39
| 114,054,873
| 7,242
| 2,244
|
Apache-2.0
| 2023-09-14T08:01:11
| 2017-12-13T00:45:11
|
Python
|
UTF-8
|
Python
| false
| false
| 1,984
|
py
|
test_conv_utils.py
|
import pytest
import torch
from espnet2.enh.layers.conv_utils import conv2d_output_shape, convtransp2d_output_shape
@pytest.mark.parametrize("input_dim", [(10, 17), (10, 33)])
@pytest.mark.parametrize("kernel_size", [(1, 3), (3, 5)])
@pytest.mark.parametrize("stride", [(1, 1), (1, 2)])
@pytest.mark.parametrize("padding", [(0, 0), (0, 1)])
@pytest.mark.parametrize("dilation", [(1, 1), (1, 2)])
def test_conv2d_output_shape(input_dim, kernel_size, stride, padding, dilation):
h, w = conv2d_output_shape(
input_dim,
kernel_size=kernel_size,
stride=stride,
pad=padding,
dilation=dilation,
)
conv = torch.nn.Conv2d(
1, 1, kernel_size, stride=stride, padding=padding, dilation=dilation
)
x = torch.rand(1, 1, *input_dim)
assert conv(x).shape[2:] == (h, w)
@pytest.mark.parametrize("input_dim", [(10, 17), (10, 33)])
@pytest.mark.parametrize("kernel_size", [(1, 3), (3, 5)])
@pytest.mark.parametrize("stride", [(1, 1), (1, 2)])
@pytest.mark.parametrize("padding", [(0, 0), (0, 1)])
@pytest.mark.parametrize("output_padding", [(0, 0), (0, 1)])
@pytest.mark.parametrize("dilation", [(1, 1), (1, 2)])
def test_deconv2d_output_shape(
input_dim, kernel_size, stride, padding, output_padding, dilation
):
if (
output_padding[0] >= stride[0]
or output_padding[0] >= dilation[0]
or output_padding[1] >= stride[1]
or output_padding[1] >= dilation[1]
):
# skip invalid cases
return
h, w = convtransp2d_output_shape(
input_dim,
kernel_size=kernel_size,
stride=stride,
pad=padding,
dilation=dilation,
out_pad=output_padding,
)
deconv = torch.nn.ConvTranspose2d(
1,
1,
kernel_size,
stride=stride,
padding=padding,
output_padding=output_padding,
dilation=dilation,
)
x = torch.rand(1, 1, *input_dim)
assert deconv(x).shape[2:] == (h, w)
|
36887322f8c2da71ebfe0a287c86b02c2ab32344
|
ef1def58b933921ccf31bece9fc6eb5f7ffb9a18
|
/tests/unit/models/test_restrictionschedule_model.py
|
c44bf604a9ef7d4192dde8d43c39ff40cbda2e4e
|
[
"Apache-2.0"
] |
permissive
|
roscisz/TensorHive
|
4b33acd727e0b294a4a12af972c471e1254136aa
|
5b50245d285618044a9a71c06ea5361a48ad4acb
|
refs/heads/master
| 2023-03-10T05:09:08.874394
| 2022-02-02T11:08:21
| 2022-02-02T11:08:21
| 98,513,283
| 153
| 26
|
Apache-2.0
| 2023-03-01T02:26:54
| 2017-07-27T08:37:35
|
Python
|
UTF-8
|
Python
| false
| false
| 2,425
|
py
|
test_restrictionschedule_model.py
|
import pytest
import datetime
from tensorhive.models.RestrictionSchedule import RestrictionSchedule
from tensorhive.utils.Weekday import Weekday
def test_schedule_creation(tables):
schedule_expression = '12345'
starts_at = datetime.time(8, 0, 0)
ends_at = datetime.time(15, 0, 0)
schedule = RestrictionSchedule(schedule_days=schedule_expression, hour_start=starts_at, hour_end=ends_at)
schedule.save()
def test_cannot_create_schedule_with_wrong_schedule_expression(tables):
starts_at = datetime.time(8, 0, 0)
ends_at = datetime.time(15, 0, 0)
wrong_schedule_expression = '1458'
schedule = RestrictionSchedule(schedule_days=wrong_schedule_expression, hour_start=starts_at, hour_end=ends_at)
with pytest.raises(AssertionError):
schedule.save()
schedule.schedule_days = '1123'
with pytest.raises(AssertionError):
schedule.save()
def test_add_schedule_to_restriction(tables, restriction, active_schedule):
restriction.add_schedule(active_schedule)
assert active_schedule in restriction.schedules
assert restriction in active_schedule.restrictions
def test_schedule_is_active_method_returns_valid_status(tables, restriction):
# schedule that runs only on current day of the week
today_schedule_expression = str(datetime.datetime.utcnow().weekday() + 1)
hour_start = datetime.time(0, 0, 0)
hour_end = datetime.time(23, 59, 59)
active_schedule = RestrictionSchedule(schedule_days=today_schedule_expression, hour_start=hour_start,
hour_end=hour_end)
active_schedule.save()
# schedule that runs on every day of the week except for today
not_today_schedule_expression = '1234567'.replace(today_schedule_expression, '')
inactive_schedule = RestrictionSchedule(schedule_days=not_today_schedule_expression, hour_start=hour_start,
hour_end=hour_end)
inactive_schedule.save()
assert active_schedule.is_active is True
assert inactive_schedule.is_active is False
def test_schedule_with_schedule_days_as_list_of_enums_gets_saved_successfully(tables):
schedule_expression = [Weekday.Monday, Weekday.Tuesday]
starts_at = datetime.time(8, 0, 0)
ends_at = datetime.time(15, 0, 0)
schedule = RestrictionSchedule(schedule_days=schedule_expression, hour_start=starts_at, hour_end=ends_at)
schedule.save()
|
4c33702ed5059d8ca7e7d05c78d29b08d4de688e
|
82f96a47af8d7dfea35f18ff57a0527812b016a1
|
/vendor/github.com/tilt-dev/starlark-lsp/pkg/analysis/builtins.py
|
590f7108df2879cea1e0af6cd6a41b4aaf4d75ce
|
[
"Apache-2.0"
] |
permissive
|
tilt-dev/tilt
|
df987756490e16f4ba5c2785bfcf58a5f33b2a1f
|
70a53642fd1812fa6d09568a9a86ce320f65787a
|
refs/heads/master
| 2023-08-08T00:25:49.726804
| 2023-08-01T16:43:21
| 2023-08-01T16:43:21
| 143,896,900
| 3,468
| 175
|
Apache-2.0
| 2023-09-05T21:50:09
| 2018-08-07T16:00:26
|
Go
|
UTF-8
|
Python
| false
| false
| 14,835
|
py
|
builtins.py
|
# This file was generated by `make builtins` based on the spec at:
# https://raw.githubusercontent.com/google/starlark-go/master/doc/spec.md
def abs(x):
"""`abs(x)` returns the absolute value of its argument `x`, which must be an int or float. The result has the same type as `x`."""
pass
def any(x) -> bool:
"""`any(x)` returns `True` if any element of the iterable sequence x has a truth value of true. If the iterable is empty, it returns `False`."""
pass
def all(x) -> bool:
"""`all(x)` returns `False` if any element of the iterable sequence x has a truth value of false. If the iterable is empty, it returns `True`."""
pass
def bool(x) -> bool:
"""`bool(x)` interprets `x` as a Boolean value---`True` or `False`. With no argument, `bool()` returns `False`."""
pass
def chr(i):
"""`chr(i)` returns a string that encodes the single Unicode code point whose value is specified by the integer `i`. `chr` fails unless 0 ≤ `i` ≤ 0x10FFFF."""
pass
def dict() -> Dict:
"""`dict` creates a dictionary. It accepts up to one positional argument, which is interpreted as an iterable of two-element sequences (pairs), each specifying a key/value pair in the resulting dictionary."""
pass
def dir(x) -> List[String]:
"""`dir(x)` returns a new sorted list of the names of the attributes (fields and methods) of its operand. The attributes of a value `x` are the names `f` such that `x.f` is a valid expression."""
pass
def enumerate(x) -> List[Tuple[int, any]]:
"""`enumerate(x)` returns a list of (index, value) pairs, each containing successive values of the iterable sequence xand the index of the value within the sequence."""
pass
def fail(*args, sep=" "):
"""The `fail(*args, sep=" ")` function causes execution to fail with the specified error message. Like `print`, arguments are formatted as if by `str(x)` and separated by a space, unless an alternative separator is specified by a `sep` named argument."""
pass
def float(x) -> float:
"""`float(x)` interprets its argument as a floating-point number."""
pass
def getattr(x, name):
"""`getattr(x, name)` returns the value of the attribute (field or method) of x named `name`. It is a dynamic error if x has no such attribute."""
pass
def hasattr(x, name) -> bool:
"""`hasattr(x, name)` reports whether x has an attribute (field or method) named `name`."""
pass
def hash(x) -> int:
"""`hash(x)` returns an integer hash of a string x such that two equal strings have the same hash. In other words `x == y` implies `hash(x) == hash(y)`."""
pass
def int(x) -> int:
"""`int(x[, base])` interprets its argument as an integer."""
pass
def len(x) -> int:
"""`len(x)` returns the number of elements in its argument."""
pass
def list() -> List:
"""`list` constructs a list."""
pass
def max(x):
"""`max(x)` returns the greatest element in the iterable sequence x."""
pass
def min(x):
"""`min(x)` returns the least element in the iterable sequence x."""
pass
def ord(s):
"""`ord(s)` returns the integer value of the sole Unicode code point encoded by the string `s`."""
pass
def print(*args, sep=" "):
"""`print(*args, sep=" ")` prints its arguments, followed by a newline. Arguments are formatted as if by `str(x)` and separated with a space, unless an alternative separator is specified by a `sep` named argument."""
pass
def range() -> List[int]:
"""`range` returns an immutable sequence of integers defined by the specified interval and stride."""
pass
def repr(x) -> String:
"""`repr(x)` formats its argument as a string."""
pass
def reversed(x) -> List:
"""`reversed(x)` returns a new list containing the elements of the iterable sequence x in reverse order."""
pass
def set(x):
"""`set(x)` returns a new set containing the elements of the iterable x. With no argument, `set()` returns a new empty set."""
pass
def sorted(x) -> List:
"""`sorted(x)` returns a new list containing the elements of the iterable sequence x, in sorted order. The sort algorithm is stable."""
pass
def str(x) -> String:
"""`str(x)` formats its argument as a string."""
pass
def tuple(x):
"""`tuple(x)` returns a tuple containing the elements of the iterable x."""
pass
def type(x) -> String:
"""type(x) returns a string describing the type of its operand."""
pass
def zip() -> List:
"""`zip()` returns a new list of n-tuples formed from corresponding elements of each of the n iterable sequences provided as arguments to `zip`. That is, the first tuple contains the first element of each of the sequences, the second element contains the second element of each of the sequences, and so on. The result list is only as long as the shortest of the input sequences."""
pass
class Dict:
def clear(self):
"""`D.clear()` removes all the entries of dictionary D and returns `None`. It fails if the dictionary is frozen or if there are active iterators."""
pass
def get(self, key):
"""`D.get(key[, default])` returns the dictionary value corresponding to the given key. If the dictionary contains no such value, `get` returns `None`, or the value of the optional `default` parameter if present."""
pass
def items(self) -> List:
"""`D.items()` returns a new list of key/value pairs, one per element in dictionary D, in the same order as they would be returned by a `for` loop."""
pass
def keys(self) -> List:
"""`D.keys()` returns a new list containing the keys of dictionary D, in the same order as they would be returned by a `for` loop."""
pass
def pop(self, key):
"""`D.pop(key[, default])` returns the value corresponding to the specified key, and removes it from the dictionary. If the dictionary contains no such value, and the optional `default` parameter is present, `pop` returns that value; otherwise, it fails."""
pass
def popitem(self):
"""`D.popitem()` returns the first key/value pair, removing it from the dictionary."""
pass
def setdefault(self, key):
"""`D.setdefault(key[, default])` returns the dictionary value corresponding to the given key. If the dictionary contains no such value, `setdefault`, like `get`, returns `None` or the value of the optional `default` parameter if present; `setdefault` additionally inserts the new key/value entry into the dictionary."""
pass
def update(self) -> None:
"""`D.update([pairs][, name=value[, ...])` makes a sequence of key/value insertions into dictionary D, then returns `None.`"""
pass
def values(self) -> List:
"""`D.values()` returns a new list containing the dictionary's values, in the same order as they would be returned by a `for` loop over the dictionary."""
pass
class List:
def append(self, x) -> None:
"""`L.append(x)` appends `x` to the list L, and returns `None`."""
pass
def clear(self) -> None:
"""`L.clear()` removes all the elements of the list L and returns `None`. It fails if the list is frozen or if there are active iterators."""
pass
def extend(self, x) -> None:
"""`L.extend(x)` appends the elements of `x`, which must be iterable, to the list L, and returns `None`."""
pass
def index(self, x) -> int:
"""`L.index(x[, start[, end]])` finds `x` within the list L and returns its index."""
pass
def insert(self, i, x) -> None:
"""`L.insert(i, x)` inserts the value `x` in the list L at index `i`, moving higher-numbered elements along by one. It returns `None`."""
pass
def pop(self):
"""`L.pop([index])` removes and returns the last element of the list L, or, if the optional index is provided, at that index."""
pass
def remove(self, x) -> None:
"""`L.remove(x)` removes the first occurrence of the value `x` from the list L, and returns `None`."""
pass
class Set:
def union(self, iterable):
"""`S.union(iterable)` returns a new set into which have been inserted all the elements of set S and all the elements of the argument, which must be iterable."""
pass
class String:
def elem_ords(self):
"""`S.elem_ords()` returns an iterable value containing the sequence of numeric bytes values in the string S."""
pass
def capitalize(self) -> String:
"""`S.capitalize()` returns a copy of string S with its first code point changed to its title case and all subsequent letters changed to their lower case."""
pass
def codepoint_ords(self):
"""`S.codepoint_ords()` returns an iterable value containing the sequence of integer Unicode code points encoded by the string S. Each invalid code within the string is treated as if it encodes the Unicode replacement character, U+FFFD."""
pass
def count(self, sub) -> int:
"""`S.count(sub[, start[, end]])` returns the number of occcurences of `sub` within the string S, or, if the optional substring indices `start` and `end` are provided, within the designated substring of S. They are interpreted according to Starlark's [indexing conventions](#indexing)."""
pass
def endswith(self, suffix) -> bool:
"""`S.endswith(suffix[, start[, end]])` reports whether the string `S[start:end]` has the specified suffix."""
pass
def find(self, sub) -> int:
"""`S.find(sub[, start[, end]])` returns the index of the first occurrence of the substring `sub` within S."""
pass
def format(self, *args, **kwargs) -> String:
"""`S.format(*args, **kwargs)` returns a version of the format string S in which bracketed portions `{...}` are replaced by arguments from `args` and `kwargs`."""
pass
def index(self, sub) -> int:
"""`S.index(sub[, start[, end]])` returns the index of the first occurrence of the substring `sub` within S, like `S.find`, except that if the substring is not found, the operation fails."""
pass
def isalnum(self) -> bool:
"""`S.isalnum()` reports whether the string S is non-empty and consists only Unicode letters and digits."""
pass
def isalpha(self) -> bool:
"""`S.isalpha()` reports whether the string S is non-empty and consists only of Unicode letters."""
pass
def isdigit(self) -> bool:
"""`S.isdigit()` reports whether the string S is non-empty and consists only of Unicode digits."""
pass
def islower(self) -> bool:
"""`S.islower()` reports whether the string S contains at least one cased Unicode letter, and all such letters are lowercase."""
pass
def isspace(self) -> bool:
"""`S.isspace()` reports whether the string S is non-empty and consists only of Unicode spaces."""
pass
def istitle(self) -> bool:
"""`S.istitle()` reports whether the string S contains at least one cased Unicode letter, and all such letters that begin a word are in title case."""
pass
def isupper(self) -> bool:
"""`S.isupper()` reports whether the string S contains at least one cased Unicode letter, and all such letters are uppercase."""
pass
def join(self, iterable) -> String:
"""`S.join(iterable)` returns the string formed by concatenating each element of its argument, with a copy of the string S between successive elements. The argument must be an iterable whose elements are strings."""
pass
def lower(self) -> String:
"""`S.lower()` returns a copy of the string S with letters converted to lowercase."""
pass
def lstrip(self) -> String:
"""`S.lstrip()` returns a copy of the string S with leading whitespace removed."""
pass
def partition(self, x):
"""`S.partition(x)` splits string S into three parts and returns them as a tuple: the portion before the first occurrence of string `x`, `x` itself, and the portion following it. If S does not contain `x`, `partition` returns `(S, "", "")`."""
pass
def removeprefix(self, x) -> String:
"""`S.removeprefix(prefix)` returns a copy of string S with the prefix `prefix` removed if S starts with `prefix`, otherwise it returns S."""
pass
def removesuffix(self, x) -> String:
"""`S.removesuffix(suffix)` returns a copy of string S with the suffix `suffix` removed if S ends with `suffix`, otherwise it returns S."""
pass
def replace(self, old, new) -> String:
"""`S.replace(old, new[, count])` returns a copy of string S with all occurrences of substring `old` replaced by `new`. If the optional argument `count`, which must be an `int`, is non-negative, it specifies a maximum number of occurrences to replace."""
pass
def rfind(self, sub) -> int:
"""`S.rfind(sub[, start[, end]])` returns the index of the substring `sub` within S, like `S.find`, except that `rfind` returns the index of the substring's _last_ occurrence."""
pass
def rindex(self, sub) -> int:
"""`S.rindex(sub[, start[, end]])` returns the index of the substring `sub` within S, like `S.index`, except that `rindex` returns the index of the substring's _last_ occurrence."""
pass
def rpartition(self, x):
"""`S.rpartition(x)` is like `partition`, but splits `S` at the last occurrence of `x`."""
pass
def rsplit(self) -> List[String]:
"""`S.rsplit([sep[, maxsplit]])` splits a string into substrings like `S.split`, except that when a maximum number of splits is specified, `rsplit` chooses the rightmost splits."""
pass
def rstrip(self) -> String:
"""`S.rstrip()` returns a copy of the string S with trailing whitespace removed."""
pass
def split(self) -> List[String]:
"""`S.split([sep [, maxsplit]])` returns the list of substrings of S, splitting at occurrences of the delimiter string `sep`."""
pass
def elems(self):
"""`S.elems()` returns an iterable value containing successive 1-byte substrings of S. To materialize the entire sequence, apply `list(...)` to the result."""
pass
def codepoints(self):
"""`S.codepoints()` returns an iterable value containing the sequence of substrings of S that each encode a single Unicode code point. Each invalid code within the string is treated as if it encodes the Unicode replacement character, U+FFFD."""
pass
def splitlines(self) -> List[String]:
"""`S.splitlines([keepends])` returns a list whose elements are the successive lines of S, that is, the strings formed by splitting S at line terminators (currently assumed to be a single newline, `\n`, regardless of platform)."""
pass
def startswith(self, prefix) -> bool:
"""`S.startswith(prefix[, start[, end]])` reports whether the string `S[start:end]` has the specified prefix."""
pass
def strip(self) -> String:
"""`S.strip()` returns a copy of the string S with leading and trailing whitespace removed."""
pass
def title(self) -> String:
"""`S.title()` returns a copy of the string S with letters converted to title case."""
pass
def upper(self) -> String:
"""`S.upper()` returns a copy of the string S with letters converted to uppercase."""
pass
|
67d905ebef985e301c195c1e996c1daf03afae61
|
77fd60c4b7e7885b2ec4ca5203edf9489f6f37dc
|
/nipy/algorithms/diagnostics/timediff.py
|
59903b6b522ef6685c4e909e1f644153d0123225
|
[
"BSD-3-Clause"
] |
permissive
|
nipy/nipy
|
156f379adbc07b259e25012662510b1f64aac4c5
|
7eede02471567487e454016c1e7cf637d3afac9e
|
refs/heads/master
| 2023-04-06T14:56:36.303421
| 2023-04-05T19:40:24
| 2023-04-05T19:40:24
| 642,344
| 275
| 115
|
BSD-3-Clause
| 2023-04-05T19:40:25
| 2010-05-02T10:00:33
|
Python
|
UTF-8
|
Python
| false
| false
| 8,360
|
py
|
timediff.py
|
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
''' Time series diagnostics
These started life as ``tsdiffana.m`` - see
http://imaging.mrc-cbu.cam.ac.uk/imaging/DataDiagnostics
Oliver Josephs (FIL) gave me (MB) the idea of time-point to time-point
subtraction as a diagnostic for motion and other sudden image changes.
'''
from __future__ import absolute_import
import numpy as np
from ...io.api import as_image
from ...core.reference.coordinate_map import (io_axis_indices, drop_io_dim, AxisError)
def time_slice_diffs(arr, time_axis=-1, slice_axis=None):
''' Time-point to time-point differences over volumes and slices
We think of the passed array as an image. The image has a "time"
dimension given by `time_axis` and a "slice" dimension, given by
`slice_axis`, and one or more other dimensions. In the case of imaging
there will usually be two more dimensions (the dimensions defining the size
of an image slice). A single slice in the time dimension we call a "volume".
A single entry in `arr` is a "voxel". For example, if `time_axis` == 0,
then ``v = arr[0]`` would be the first volume in the series. The volume
``v`` above has ``v.size`` voxels. If, in addition, `slice_axis` == 1, then
for the volume ``v`` (above) ``s = v[0]`` would be a "slice", with
``s.size`` voxels. These are obviously terms from neuroimaging.
Parameters
----------
arr : array_like
Array over which to calculate time and slice differences. We'll
call this array an 'image' in this doc.
time_axis : int, optional
axis of `arr` that varies over time. Default is last
slice_axis : None or int, optional
axis of `arr` that varies over image slice. None gives last non-time
axis.
Returns
-------
results : dict
``T`` is the number of time points (``arr.shape[time_axis]``)
``S`` is the number of slices (``arr.shape[slice_axis]``)
``v`` is the shape of a volume (``rollimg(arr, time_axis)[0].shape``)
``d2[t]`` is the volume of squared differences between voxels at
time point ``t`` and time point ``t+1``
`results` has keys:
* 'volume_mean_diff2' : (T-1,) array
array containing the mean (over voxels in volume) of the
squared difference from one time point to the next
* 'slice_mean_diff2' : (T-1, S) array
giving the mean (over voxels in slice) of the difference from
one time point to the next, one value per slice, per
timepoint
* 'volume_means' : (T,) array
mean over voxels for each volume ``vol[t] for t in 0:T``
* 'slice_diff2_max_vol' : v[:] array
volume, of same shape as input time point volumes, where each slice
is is the slice from ``d2[t]`` for t in 0:T-1, that has the largest
variance across ``t``. Thus each slice in the volume may well result
from a different difference time point.
* 'diff2_mean_vol`` : v[:] array
volume with the mean of ``d2[t]`` across t for t in 0:T-1.
Raises
------
ValueError : if `time_axis` refers to same axis as `slice_axis`
'''
arr = np.asarray(arr)
ndim = arr.ndim
# roll time axis to 0, slice axis to 1 for convenience
if time_axis < 0:
time_axis += ndim
if slice_axis is None:
slice_axis = ndim-2 if time_axis == ndim-1 else ndim-1
elif slice_axis < 0:
slice_axis += ndim
if time_axis == slice_axis:
raise ValueError('Time axis refers to same axis as slice axis')
arr = np.rollaxis(arr, time_axis)
# we may have changed the position of slice_axis
if time_axis > slice_axis:
slice_axis += 1
arr = np.rollaxis(arr, slice_axis, 1)
# shapes of things
shape = arr.shape
T = shape[0]
S = shape[1]
vol_shape = shape[1:]
# loop over time points to save memory
volds = np.empty((T-1,))
sliceds = np.empty((T-1,S))
means = np.empty((T,))
diff_mean_vol = np.zeros(vol_shape)
slice_diff_max_vol = np.zeros(vol_shape)
slice_diff_maxes = np.zeros(S)
last_tp = arr[0]
means[0] = last_tp.mean()
for dtpi in range(0,T-1):
tp = arr[dtpi+1] # shape vol_shape
means[dtpi+1] = tp.mean()
dtp_diff2 = (tp - last_tp)**2
diff_mean_vol += dtp_diff2
sliceds[dtpi] = dtp_diff2.reshape(S, -1).mean(-1)
# check whether we have found a highest-diff slice
sdmx_higher = sliceds[dtpi] > slice_diff_maxes
if any(sdmx_higher):
slice_diff_maxes[sdmx_higher] = sliceds[dtpi][sdmx_higher]
slice_diff_max_vol[sdmx_higher] = dtp_diff2[sdmx_higher]
last_tp = tp
volds = sliceds.mean(1)
diff_mean_vol /= (T-1)
# roll vol shapes back to match input
diff_mean_vol = np.rollaxis(diff_mean_vol, 0, slice_axis)
slice_diff_max_vol = np.rollaxis(slice_diff_max_vol, 0, slice_axis)
return {'volume_mean_diff2': volds,
'slice_mean_diff2': sliceds,
'volume_means': means,
'diff2_mean_vol': diff_mean_vol,
'slice_diff2_max_vol': slice_diff_max_vol}
def time_slice_diffs_image(img, time_axis='t', slice_axis='slice'):
""" Time-point to time-point differences over volumes and slices of image
Parameters
----------
img : Image
The image on which to perform time-point differences
time_axis : str or int, optional
Axis indexing time-points. Default is 't'. If `time_axis` is an integer,
gives the index of the input (domain) axis of `img`. If `time_axis` is a str,
can be an input (domain) name, or an output (range) name, that maps to
an input (domain) name.
slice_axis : str or int, optional
Axis indexing MRI slices. If `slice_axis` is an integer, gives the
index of the input (domain) axis of `img`. If `slice_axis` is a str,
can be an input (domain) name, or an output (range) name, that maps to
an input (domain) name.
Returns
-------
results : dict
`arr` refers to the array as loaded from `img`
``T`` is the number of time points (``img.shape[time_axis]``)
``S`` is the number of slices (``img.shape[slice_axis]``)
``v`` is the shape of a volume (``rollimg(img, time_axis)[0].shape``)
``d2[t]`` is the volume of squared differences between voxels at
time point ``t`` and time point ``t+1``
`results` has keys:
* 'volume_mean_diff2' : (T-1,) array
array containing the mean (over voxels in volume) of the
squared difference from one time point to the next
* 'slice_mean_diff2' : (T-1, S) array
giving the mean (over voxels in slice) of the difference from
one time point to the next, one value per slice, per
timepoint
* 'volume_means' : (T,) array
mean over voxels for each volume ``vol[t] for t in 0:T``
* 'slice_diff2_max_vol' : v[:] image
image volume, of same shape as input time point volumes, where each
slice is is the slice from ``d2[t]`` for t in 0:T-1, that has the
largest variance across ``t``. Thus each slice in the volume may
well result from a different difference time point.
* 'diff2_mean_vol`` : v[:] image
image volume with the mean of ``d2[t]`` across t for t in 0:T-1.
"""
img = as_image(img)
img_class = img.__class__
time_in_ax, time_out_ax = io_axis_indices(img.coordmap, time_axis)
if None in (time_in_ax, time_out_ax):
raise AxisError('Cannot identify matching input output axes with "%s"'
% time_axis)
slice_in_ax, slice_out_ax = io_axis_indices(img.coordmap, slice_axis)
if None in (slice_in_ax, slice_out_ax):
raise AxisError('Cannot identify matching input output axes with "%s"'
% slice_axis)
vol_coordmap = drop_io_dim(img.coordmap, time_axis)
results = time_slice_diffs(img.get_data(), time_in_ax, slice_in_ax)
for key in ('slice_diff2_max_vol', 'diff2_mean_vol'):
vol = img_class(results[key], vol_coordmap)
results[key] = vol
return results
|
8751391d9b1432c298b601468a8b5969fa5812cb
|
4e2117a4381f65e7f2bb2b06da800f40dc98fa12
|
/022_Learning_to_See_Moving_Objects_in_the_Dark/01_float32/config.py
|
b02c2847a30fc22cebc6585dc48271bb7219a678
|
[
"AGPL-3.0-only",
"LicenseRef-scancode-proprietary-license",
"MIT"
] |
permissive
|
PINTO0309/PINTO_model_zoo
|
84f995247afbeda2543b5424d5e0a14a70b8d1f1
|
ff08e6e8ab095d98e96fc4a136ad5cbccc75fcf9
|
refs/heads/main
| 2023-09-04T05:27:31.040946
| 2023-08-31T23:24:30
| 2023-08-31T23:24:30
| 227,367,327
| 2,849
| 520
|
MIT
| 2023-08-31T23:24:31
| 2019-12-11T13:02:40
|
Python
|
UTF-8
|
Python
| false
| false
| 1,305
|
py
|
config.py
|
#!/usr/bin/env python
# ----------------------------------------------------------------
# Configurations for Training and Testing Process
# Written by Haiyang Jiang
# Mar 1st 2019
# ----------------------------------------------------------------
# file lists ================================================================
FILE_LIST = 'file_list'
VALID_LIST = 'valid_list'
TEST_LIST = 'test_list'
CUSOMIZED_LIST = 'customized_list'
# network.py ================================================================
DEBUG = False
# train.py ================================================================
EXP_NAME = '16_bit_HE_to_HE_gt'
CHECKPOINT_DIR = './1_checkpoint/' + EXP_NAME + '/'
RESULT_DIR = './2_result/' + EXP_NAME + '/'
LOGS_DIR = RESULT_DIR
TRAIN_LOG_DIR = 'train'
VAL_LOG_DIR = 'val'
# training settings
ALL_FRAME = 200
SAVE_FRAMES = list(range(0, ALL_FRAME, 32))
CROP_FRAME = 16
CROP_HEIGHT = 256
CROP_WIDTH = 256
SAVE_FREQ = 5
MAX_EPOCH = 50
FRAME_FREQ = 1
GROUP_NUM = 4
INIT_LR = 1e-4
DECAY_LR = 1e-5
DECAY_EPOCH = 30
# test.py ================================================================
# TEST_CROP_FRAME = 32
# TEST_CROP_HEIGHT = 512
# TEST_CROP_WIDTH= 512
TEST_CROP_FRAME = 16
TEST_CROP_HEIGHT = 256
TEST_CROP_WIDTH= 256
MAX_FRAME = 800
OVERLAP = 0.01
OUT_MAX = 255.0
|
5a72fdbc5a5eff739bada859b347b0b111e43cc2
|
b74320ad439e37dfa48cd8db38dab3b7a20a36ff
|
/scripts/convert_stable_diffusion_controlnet_to_onnx.py
|
4af39b28783681c9e6626cd2c003f2b8635224a5
|
[
"Apache-2.0"
] |
permissive
|
huggingface/diffusers
|
c82beba1ec5f0aba01b6744040a5accc41ec2493
|
5eeedd9e3336882d598091e191559f67433b6427
|
refs/heads/main
| 2023-08-29T01:22:52.237910
| 2023-08-28T18:16:27
| 2023-08-28T18:16:27
| 498,011,141
| 17,308
| 3,158
|
Apache-2.0
| 2023-09-14T20:57:44
| 2022-05-30T16:04:02
|
Python
|
UTF-8
|
Python
| false
| false
| 18,463
|
py
|
convert_stable_diffusion_controlnet_to_onnx.py
|
import argparse
import os
import shutil
from pathlib import Path
import onnx
import onnx_graphsurgeon as gs
import torch
from onnx import shape_inference
from packaging import version
from polygraphy.backend.onnx.loader import fold_constants
from torch.onnx import export
from diffusers import (
ControlNetModel,
StableDiffusionControlNetImg2ImgPipeline,
)
from diffusers.models.attention_processor import AttnProcessor
from diffusers.pipelines.controlnet.pipeline_controlnet_sd_xl import StableDiffusionXLControlNetPipeline
is_torch_less_than_1_11 = version.parse(version.parse(torch.__version__).base_version) < version.parse("1.11")
is_torch_2_0_1 = version.parse(version.parse(torch.__version__).base_version) == version.parse("2.0.1")
class Optimizer:
def __init__(self, onnx_graph, verbose=False):
self.graph = gs.import_onnx(onnx_graph)
self.verbose = verbose
def info(self, prefix):
if self.verbose:
print(
f"{prefix} .. {len(self.graph.nodes)} nodes, {len(self.graph.tensors().keys())} tensors, {len(self.graph.inputs)} inputs, {len(self.graph.outputs)} outputs"
)
def cleanup(self, return_onnx=False):
self.graph.cleanup().toposort()
if return_onnx:
return gs.export_onnx(self.graph)
def select_outputs(self, keep, names=None):
self.graph.outputs = [self.graph.outputs[o] for o in keep]
if names:
for i, name in enumerate(names):
self.graph.outputs[i].name = name
def fold_constants(self, return_onnx=False):
onnx_graph = fold_constants(gs.export_onnx(self.graph), allow_onnxruntime_shape_inference=True)
self.graph = gs.import_onnx(onnx_graph)
if return_onnx:
return onnx_graph
def infer_shapes(self, return_onnx=False):
onnx_graph = gs.export_onnx(self.graph)
if onnx_graph.ByteSize() > 2147483648:
raise TypeError("ERROR: model size exceeds supported 2GB limit")
else:
onnx_graph = shape_inference.infer_shapes(onnx_graph)
self.graph = gs.import_onnx(onnx_graph)
if return_onnx:
return onnx_graph
def optimize(onnx_graph, name, verbose):
opt = Optimizer(onnx_graph, verbose=verbose)
opt.info(name + ": original")
opt.cleanup()
opt.info(name + ": cleanup")
opt.fold_constants()
opt.info(name + ": fold constants")
# opt.infer_shapes()
# opt.info(name + ': shape inference')
onnx_opt_graph = opt.cleanup(return_onnx=True)
opt.info(name + ": finished")
return onnx_opt_graph
class UNet2DConditionControlNetModel(torch.nn.Module):
def __init__(
self,
unet,
controlnets: ControlNetModel,
):
super().__init__()
self.unet = unet
self.controlnets = controlnets
def forward(
self,
sample,
timestep,
encoder_hidden_states,
controlnet_conds,
controlnet_scales,
):
for i, (controlnet_cond, conditioning_scale, controlnet) in enumerate(
zip(controlnet_conds, controlnet_scales, self.controlnets)
):
down_samples, mid_sample = controlnet(
sample,
timestep,
encoder_hidden_states=encoder_hidden_states,
controlnet_cond=controlnet_cond,
conditioning_scale=conditioning_scale,
return_dict=False,
)
# merge samples
if i == 0:
down_block_res_samples, mid_block_res_sample = down_samples, mid_sample
else:
down_block_res_samples = [
samples_prev + samples_curr
for samples_prev, samples_curr in zip(down_block_res_samples, down_samples)
]
mid_block_res_sample += mid_sample
noise_pred = self.unet(
sample,
timestep,
encoder_hidden_states=encoder_hidden_states,
down_block_additional_residuals=down_block_res_samples,
mid_block_additional_residual=mid_block_res_sample,
return_dict=False,
)[0]
return noise_pred
class UNet2DConditionXLControlNetModel(torch.nn.Module):
def __init__(
self,
unet,
controlnets: ControlNetModel,
):
super().__init__()
self.unet = unet
self.controlnets = controlnets
def forward(
self,
sample,
timestep,
encoder_hidden_states,
controlnet_conds,
controlnet_scales,
text_embeds,
time_ids,
):
added_cond_kwargs = {"text_embeds": text_embeds, "time_ids": time_ids}
for i, (controlnet_cond, conditioning_scale, controlnet) in enumerate(
zip(controlnet_conds, controlnet_scales, self.controlnets)
):
down_samples, mid_sample = controlnet(
sample,
timestep,
encoder_hidden_states=encoder_hidden_states,
controlnet_cond=controlnet_cond,
conditioning_scale=conditioning_scale,
added_cond_kwargs=added_cond_kwargs,
return_dict=False,
)
# merge samples
if i == 0:
down_block_res_samples, mid_block_res_sample = down_samples, mid_sample
else:
down_block_res_samples = [
samples_prev + samples_curr
for samples_prev, samples_curr in zip(down_block_res_samples, down_samples)
]
mid_block_res_sample += mid_sample
noise_pred = self.unet(
sample,
timestep,
encoder_hidden_states=encoder_hidden_states,
down_block_additional_residuals=down_block_res_samples,
mid_block_additional_residual=mid_block_res_sample,
added_cond_kwargs=added_cond_kwargs,
return_dict=False,
)[0]
return noise_pred
def onnx_export(
model,
model_args: tuple,
output_path: Path,
ordered_input_names,
output_names,
dynamic_axes,
opset,
use_external_data_format=False,
):
output_path.parent.mkdir(parents=True, exist_ok=True)
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
with torch.inference_mode(), torch.autocast("cuda"):
if is_torch_less_than_1_11:
export(
model,
model_args,
f=output_path.as_posix(),
input_names=ordered_input_names,
output_names=output_names,
dynamic_axes=dynamic_axes,
do_constant_folding=True,
use_external_data_format=use_external_data_format,
enable_onnx_checker=True,
opset_version=opset,
)
else:
export(
model,
model_args,
f=output_path.as_posix(),
input_names=ordered_input_names,
output_names=output_names,
dynamic_axes=dynamic_axes,
do_constant_folding=True,
opset_version=opset,
)
@torch.no_grad()
def convert_models(
model_path: str, controlnet_path: list, output_path: str, opset: int, fp16: bool = False, sd_xl: bool = False
):
"""
Function to convert models in stable diffusion controlnet pipeline into ONNX format
Example:
python convert_stable_diffusion_controlnet_to_onnx.py
--model_path danbrown/RevAnimated-v1-2-2
--controlnet_path lllyasviel/control_v11f1e_sd15_tile ioclab/brightness-controlnet
--output_path path-to-models-stable_diffusion/RevAnimated-v1-2-2
--fp16
Example for SD XL:
python convert_stable_diffusion_controlnet_to_onnx.py
--model_path stabilityai/stable-diffusion-xl-base-1.0
--controlnet_path SargeZT/sdxl-controlnet-seg
--output_path path-to-models-stable_diffusion/stable-diffusion-xl-base-1.0
--fp16
--sd_xl
Returns:
create 4 onnx models in output path
text_encoder/model.onnx
unet/model.onnx + unet/weights.pb
vae_encoder/model.onnx
vae_decoder/model.onnx
run test script in diffusers/examples/community
python test_onnx_controlnet.py
--sd_model danbrown/RevAnimated-v1-2-2
--onnx_model_dir path-to-models-stable_diffusion/RevAnimated-v1-2-2
--qr_img_path path-to-qr-code-image
"""
dtype = torch.float16 if fp16 else torch.float32
if fp16 and torch.cuda.is_available():
device = "cuda"
elif fp16 and not torch.cuda.is_available():
raise ValueError("`float16` model export is only supported on GPUs with CUDA")
else:
device = "cpu"
# init controlnet
controlnets = []
for path in controlnet_path:
controlnet = ControlNetModel.from_pretrained(path, torch_dtype=dtype).to(device)
if is_torch_2_0_1:
controlnet.set_attn_processor(AttnProcessor())
controlnets.append(controlnet)
if sd_xl:
if len(controlnets) == 1:
controlnet = controlnets[0]
else:
raise ValueError("MultiControlNet is not yet supported.")
pipeline = StableDiffusionXLControlNetPipeline.from_pretrained(
model_path, controlnet=controlnet, torch_dtype=dtype, variant="fp16", use_safetensors=True
).to(device)
else:
pipeline = StableDiffusionControlNetImg2ImgPipeline.from_pretrained(
model_path, controlnet=controlnets, torch_dtype=dtype
).to(device)
output_path = Path(output_path)
if is_torch_2_0_1:
pipeline.unet.set_attn_processor(AttnProcessor())
pipeline.vae.set_attn_processor(AttnProcessor())
# # TEXT ENCODER
num_tokens = pipeline.text_encoder.config.max_position_embeddings
text_hidden_size = pipeline.text_encoder.config.hidden_size
text_input = pipeline.tokenizer(
"A sample prompt",
padding="max_length",
max_length=pipeline.tokenizer.model_max_length,
truncation=True,
return_tensors="pt",
)
onnx_export(
pipeline.text_encoder,
# casting to torch.int32 until the CLIP fix is released: https://github.com/huggingface/transformers/pull/18515/files
model_args=(text_input.input_ids.to(device=device, dtype=torch.int32)),
output_path=output_path / "text_encoder" / "model.onnx",
ordered_input_names=["input_ids"],
output_names=["last_hidden_state", "pooler_output"],
dynamic_axes={
"input_ids": {0: "batch", 1: "sequence"},
},
opset=opset,
)
del pipeline.text_encoder
# # UNET
if sd_xl:
controlnets = torch.nn.ModuleList(controlnets)
unet_controlnet = UNet2DConditionXLControlNetModel(pipeline.unet, controlnets)
unet_in_channels = pipeline.unet.config.in_channels
unet_sample_size = pipeline.unet.config.sample_size
text_hidden_size = 2048
img_size = 8 * unet_sample_size
unet_path = output_path / "unet" / "model.onnx"
onnx_export(
unet_controlnet,
model_args=(
torch.randn(2, unet_in_channels, unet_sample_size, unet_sample_size).to(device=device, dtype=dtype),
torch.tensor([1.0]).to(device=device, dtype=dtype),
torch.randn(2, num_tokens, text_hidden_size).to(device=device, dtype=dtype),
torch.randn(len(controlnets), 2, 3, img_size, img_size).to(device=device, dtype=dtype),
torch.randn(len(controlnets), 1).to(device=device, dtype=dtype),
torch.randn(2, 1280).to(device=device, dtype=dtype),
torch.rand(2, 6).to(device=device, dtype=dtype),
),
output_path=unet_path,
ordered_input_names=[
"sample",
"timestep",
"encoder_hidden_states",
"controlnet_conds",
"conditioning_scales",
"text_embeds",
"time_ids",
],
output_names=["noise_pred"], # has to be different from "sample" for correct tracing
dynamic_axes={
"sample": {0: "2B", 2: "H", 3: "W"},
"encoder_hidden_states": {0: "2B"},
"controlnet_conds": {1: "2B", 3: "8H", 4: "8W"},
"text_embeds": {0: "2B"},
"time_ids": {0: "2B"},
},
opset=opset,
use_external_data_format=True, # UNet is > 2GB, so the weights need to be split
)
unet_model_path = str(unet_path.absolute().as_posix())
unet_dir = os.path.dirname(unet_model_path)
# optimize onnx
shape_inference.infer_shapes_path(unet_model_path, unet_model_path)
unet_opt_graph = optimize(onnx.load(unet_model_path), name="Unet", verbose=True)
# clean up existing tensor files
shutil.rmtree(unet_dir)
os.mkdir(unet_dir)
# collate external tensor files into one
onnx.save_model(
unet_opt_graph,
unet_model_path,
save_as_external_data=True,
all_tensors_to_one_file=True,
location="weights.pb",
convert_attribute=False,
)
del pipeline.unet
else:
controlnets = torch.nn.ModuleList(controlnets)
unet_controlnet = UNet2DConditionControlNetModel(pipeline.unet, controlnets)
unet_in_channels = pipeline.unet.config.in_channels
unet_sample_size = pipeline.unet.config.sample_size
img_size = 8 * unet_sample_size
unet_path = output_path / "unet" / "model.onnx"
onnx_export(
unet_controlnet,
model_args=(
torch.randn(2, unet_in_channels, unet_sample_size, unet_sample_size).to(device=device, dtype=dtype),
torch.tensor([1.0]).to(device=device, dtype=dtype),
torch.randn(2, num_tokens, text_hidden_size).to(device=device, dtype=dtype),
torch.randn(len(controlnets), 2, 3, img_size, img_size).to(device=device, dtype=dtype),
torch.randn(len(controlnets), 1).to(device=device, dtype=dtype),
),
output_path=unet_path,
ordered_input_names=[
"sample",
"timestep",
"encoder_hidden_states",
"controlnet_conds",
"conditioning_scales",
],
output_names=["noise_pred"], # has to be different from "sample" for correct tracing
dynamic_axes={
"sample": {0: "2B", 2: "H", 3: "W"},
"encoder_hidden_states": {0: "2B"},
"controlnet_conds": {1: "2B", 3: "8H", 4: "8W"},
},
opset=opset,
use_external_data_format=True, # UNet is > 2GB, so the weights need to be split
)
unet_model_path = str(unet_path.absolute().as_posix())
unet_dir = os.path.dirname(unet_model_path)
# optimize onnx
shape_inference.infer_shapes_path(unet_model_path, unet_model_path)
unet_opt_graph = optimize(onnx.load(unet_model_path), name="Unet", verbose=True)
# clean up existing tensor files
shutil.rmtree(unet_dir)
os.mkdir(unet_dir)
# collate external tensor files into one
onnx.save_model(
unet_opt_graph,
unet_model_path,
save_as_external_data=True,
all_tensors_to_one_file=True,
location="weights.pb",
convert_attribute=False,
)
del pipeline.unet
# VAE ENCODER
vae_encoder = pipeline.vae
vae_in_channels = vae_encoder.config.in_channels
vae_sample_size = vae_encoder.config.sample_size
# need to get the raw tensor output (sample) from the encoder
vae_encoder.forward = lambda sample: vae_encoder.encode(sample).latent_dist.sample()
onnx_export(
vae_encoder,
model_args=(torch.randn(1, vae_in_channels, vae_sample_size, vae_sample_size).to(device=device, dtype=dtype),),
output_path=output_path / "vae_encoder" / "model.onnx",
ordered_input_names=["sample"],
output_names=["latent_sample"],
dynamic_axes={
"sample": {0: "batch", 1: "channels", 2: "height", 3: "width"},
},
opset=opset,
)
# VAE DECODER
vae_decoder = pipeline.vae
vae_latent_channels = vae_decoder.config.latent_channels
# forward only through the decoder part
vae_decoder.forward = vae_encoder.decode
onnx_export(
vae_decoder,
model_args=(
torch.randn(1, vae_latent_channels, unet_sample_size, unet_sample_size).to(device=device, dtype=dtype),
),
output_path=output_path / "vae_decoder" / "model.onnx",
ordered_input_names=["latent_sample"],
output_names=["sample"],
dynamic_axes={
"latent_sample": {0: "batch", 1: "channels", 2: "height", 3: "width"},
},
opset=opset,
)
del pipeline.vae
del pipeline
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--sd_xl", action="store_true", default=False, help="SD XL pipeline")
parser.add_argument(
"--model_path",
type=str,
required=True,
help="Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).",
)
parser.add_argument(
"--controlnet_path",
nargs="+",
required=True,
help="Path to the `controlnet` checkpoint to convert (either a local directory or on the Hub).",
)
parser.add_argument("--output_path", type=str, required=True, help="Path to the output model.")
parser.add_argument(
"--opset",
default=14,
type=int,
help="The version of the ONNX operator set to use.",
)
parser.add_argument("--fp16", action="store_true", default=False, help="Export the models in `float16` mode")
args = parser.parse_args()
convert_models(args.model_path, args.controlnet_path, args.output_path, args.opset, args.fp16, args.sd_xl)
|
8091a41e56688c87aa24f617f20a706daebb812c
|
61004e474b7b2ad0071c16766f0f7874f04f9466
|
/examples/multicluster-dag-dependencies/pattern-2/dag/dags-for-pattern2-extended/workflow_1_pattern2_extended.py
|
2465a2af80e3fff89f819c4aeaea090ce0f5d2f3
|
[
"Apache-2.0"
] |
permissive
|
GoogleCloudPlatform/professional-services
|
eb79751efae765a8c691a745e520f44f51bd715c
|
0f51121b945bd74c7f667e74e8861fceda87565c
|
refs/heads/main
| 2023-09-05T02:57:33.328973
| 2023-08-30T14:40:30
| 2023-08-30T14:40:30
| 91,730,359
| 2,626
| 1,381
|
Apache-2.0
| 2023-09-14T20:13:42
| 2017-05-18T19:29:27
|
Python
|
UTF-8
|
Python
| false
| false
| 1,181
|
py
|
workflow_1_pattern2_extended.py
|
# Copyright 2022 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from airflow import DAG
from airflow.operators.bash_operator import BashOperator
from airflow.utils import dates
default_args = {
'owner': 'airflow',
'depends_on_past': False,
'start_date': dates.days_ago(2),
'email_on_failure': False,
'email_on_retry': False,
}
with DAG(
'workflow_1_pattern2_extended',
default_args=default_args,
schedule_interval='*/5 * * * *',
catchup=False,
) as dag:
t1 = BashOperator(task_id='task1', bash_command="echo 'Executing task1..'")
t2 = BashOperator(task_id='task2', bash_command="sleep 1s")
t1 >> t2
|
98d26eb59cfc82549cafd360396a68850bcd0597
|
61b353117076f502c6d41c7932de55468bd51078
|
/tests/test_zookeeper.py
|
dac5cfe7efa28190178707b3bd5885f1ed30901c
|
[
"Apache-2.0"
] |
permissive
|
confluentinc/cp-docker-images
|
20a3727732ad7f0a74357d79eb017af577872953
|
027755d5b25afc4a9ad48dba10108eac8ea56839
|
refs/heads/5.3.3-post
| 2023-08-16T10:38:51.869440
| 2022-11-09T20:25:02
| 2022-11-09T20:25:02
| 61,352,799
| 1,237
| 896
|
Apache-2.0
| 2023-05-02T18:48:59
| 2016-06-17T07:01:23
|
Python
|
UTF-8
|
Python
| false
| false
| 17,195
|
py
|
test_zookeeper.py
|
import os
import unittest
import utils
import time
import string
CURRENT_DIR = os.path.dirname(os.path.abspath(__file__))
FIXTURES_DIR = os.path.join(CURRENT_DIR, "fixtures", "debian", "zookeeper")
MODE_COMMAND = "bash -c 'dub wait localhost {port} 30 && echo stat | nc localhost {port} | grep Mode'"
HEALTH_CHECK = "bash -c 'cub zk-ready {host}:{port} 30 && echo PASS || echo FAIL'"
JMX_CHECK = """bash -c "\
echo 'get -b org.apache.ZooKeeperService:name0=StandaloneServer_port{client_port} Version' |
java -jar jmxterm-1.0-alpha-4-uber.jar -l {jmx_hostname}:{jmx_port} -n -v silent "
"""
KADMIN_KEYTAB_CREATE = """bash -c \
'kadmin.local -q "addprinc -randkey {principal}/{hostname}@TEST.CONFLUENT.IO" && \
kadmin.local -q "ktadd -norandkey -k /tmp/keytab/{filename}.keytab {principal}/{hostname}@TEST.CONFLUENT.IO"'
"""
class ConfigTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
machine_name = os.environ["DOCKER_MACHINE_NAME"]
cls.machine = utils.TestMachine(machine_name)
# Create directories with the correct permissions for test with userid and external volumes.
utils.run_command_on_host(
"mkdir -p /tmp/zk-config-kitchen-sink-test/data /tmp/zk-config-kitchen-sink-test/log")
utils.run_command_on_host(
"chown -R 12345 /tmp/zk-config-kitchen-sink-test/data /tmp/zk-config-kitchen-sink-test/log")
# Copy SSL files.
cls.machine.ssh("mkdir -p /tmp/zookeeper-config-test/secrets")
local_secrets_dir = os.path.join(FIXTURES_DIR, "secrets")
cls.machine.scp_to_machine(local_secrets_dir, "/tmp/zookeeper-config-test")
cls.cluster = utils.TestCluster("config-test", FIXTURES_DIR, "standalone-config.yml")
cls.cluster.start()
# Create keytabs
cls.cluster.run_command_on_service("kerberos", KADMIN_KEYTAB_CREATE.format(filename="zookeeper-config", principal="zookeeper", hostname="sasl-config"))
cls.cluster.run_command_on_service("kerberos", KADMIN_KEYTAB_CREATE.format(filename="zkclient-config", principal="zkclient", hostname="sasl-config"))
@classmethod
def tearDownClass(cls):
cls.cluster.shutdown()
utils.run_command_on_host("rm -rf /tmp/zk-config-kitchen-sink-test")
utils.run_command_on_host(" rm -rf /tmp/zookeeper-config-test")
@classmethod
def is_zk_healthy_for_service(cls, service, client_port, host="localhost"):
output = cls.cluster.run_command_on_service(service, HEALTH_CHECK.format(port=client_port, host=host))
assert "PASS" in output
def test_required_config_failure(self):
self.assertTrue("ZOOKEEPER_CLIENT_PORT is required." in self.cluster.service_logs("failing-config", stopped=True))
self.assertTrue("ZOOKEEPER_SERVER_ID is required." in self.cluster.service_logs("failing-config-server-id", stopped=True))
def test_default_config(self):
self.is_zk_healthy_for_service("default-config", 2181)
import string
zk_props = self.cluster.run_command_on_service("default-config", "cat /etc/kafka/zookeeper.properties")
expected = """clientPort=2181
dataDir=/var/lib/zookeeper/data
dataLogDir=/var/lib/zookeeper/log
"""
self.assertEquals(zk_props.translate(None, string.whitespace), expected.translate(None, string.whitespace))
def test_default_logging_config(self):
self.is_zk_healthy_for_service("default-config", 2181)
log4j_props = self.cluster.run_command_on_service("default-config", "cat /etc/kafka/log4j.properties")
expected_log4j_props = """log4j.rootLogger=INFO, stdout
log4j.appender.stdout=org.apache.log4j.ConsoleAppender
log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c)%n
"""
self.assertEquals(log4j_props.translate(None, string.whitespace), expected_log4j_props.translate(None, string.whitespace))
tools_log4j_props = self.cluster.run_command_on_service("default-config", "cat /etc/kafka/tools-log4j.properties")
expected_tools_log4j_props = """log4j.rootLogger=WARN, stderr
log4j.appender.stderr=org.apache.log4j.ConsoleAppender
log4j.appender.stderr.layout=org.apache.log4j.PatternLayout
log4j.appender.stderr.layout.ConversionPattern=[%d] %p %m (%c)%n
log4j.appender.stderr.Target=System.err
"""
self.assertEquals(tools_log4j_props.translate(None, string.whitespace), expected_tools_log4j_props.translate(None, string.whitespace))
def test_full_config(self):
self.is_zk_healthy_for_service("full-config", 22181)
zk_props = self.cluster.run_command_on_service("full-config", "cat /etc/kafka/zookeeper.properties")
expected = """clientPort=22181
dataDir=/var/lib/zookeeper/data
dataLogDir=/var/lib/zookeeper/log
initLimit=25
autopurge.purgeInterval=2
syncLimit=20
autopurge.snapRetainCount=4
tickTime=5555
quorumListenOnAllIPs=false
"""
self.assertEquals(zk_props.translate(None, string.whitespace), expected.translate(None, string.whitespace))
zk_id = self.cluster.run_command_on_service("full-config", "cat /var/lib/zookeeper/data/myid")
self.assertEquals(zk_id, "1")
def test_full_logging_config(self):
self.is_zk_healthy_for_service("full-config", 22181)
log4j_props = self.cluster.run_command_on_service("full-config", "cat /etc/kafka/log4j.properties")
expected_log4j_props = """log4j.rootLogger=WARN, stdout
log4j.appender.stdout=org.apache.log4j.ConsoleAppender
log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c)%n
log4j.logger.zookeeper.foo.bar=DEBUG, stdout
"""
self.assertEquals(log4j_props.translate(None, string.whitespace), expected_log4j_props.translate(None, string.whitespace))
tools_log4j_props = self.cluster.run_command_on_service("full-config", "cat /etc/kafka/tools-log4j.properties")
expected_tools_log4j_props = """log4j.rootLogger=ERROR, stderr
log4j.appender.stderr=org.apache.log4j.ConsoleAppender
log4j.appender.stderr.layout=org.apache.log4j.PatternLayout
log4j.appender.stderr.layout.ConversionPattern=[%d] %p %m (%c)%n
log4j.appender.stderr.Target=System.err
"""
self.assertEquals(tools_log4j_props.translate(None, string.whitespace), expected_tools_log4j_props.translate(None, string.whitespace))
def test_volumes(self):
self.is_zk_healthy_for_service("external-volumes", 2181)
def test_sasl_config(self):
self.is_zk_healthy_for_service("sasl-config", 52181, "sasl-config")
def test_random_user(self):
self.is_zk_healthy_for_service("random-user", 2181)
def test_kitchen_sink(self):
self.is_zk_healthy_for_service("kitchen-sink", 22181)
zk_props = self.cluster.run_command_on_service("kitchen-sink", "cat /etc/kafka/zookeeper.properties")
expected = """clientPort=22181
dataDir=/var/lib/zookeeper/data
dataLogDir=/var/lib/zookeeper/log
initLimit=25
syncLimit=20
tickTime=5555
quorumListenOnAllIPs=false
"""
self.assertTrue(zk_props.translate(None, string.whitespace) == expected.translate(None, string.whitespace))
zk_id = self.cluster.run_command_on_service("full-config", "cat /var/lib/zookeeper/data/myid")
self.assertTrue(zk_id == "1")
class StandaloneNetworkingTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.cluster = utils.TestCluster("standalone-network-test", FIXTURES_DIR, "standalone-network.yml")
cls.cluster.start()
@classmethod
def tearDownClass(cls):
cls.cluster.shutdown()
@classmethod
def is_zk_healthy_for_service(cls, service, client_port, host="localhost"):
output = cls.cluster.run_command_on_service(service, HEALTH_CHECK.format(port=client_port, host=host))
assert "PASS" in output
def test_bridge_network(self):
# Test from within the container
self.is_zk_healthy_for_service("bridge-network", 2181)
# Test from outside the container
logs = utils.run_docker_command(
image="confluentinc/cp-zookeeper",
command=HEALTH_CHECK.format(port=22181, host="localhost"),
host_config={'NetworkMode': 'host'})
self.assertTrue("PASS" in logs)
def test_host_network(self):
# Test from within the container
self.is_zk_healthy_for_service("host-network", 32181)
# Test from outside the container
logs = utils.run_docker_command(
image="confluentinc/cp-zookeeper",
command=HEALTH_CHECK.format(port=32181, host="localhost"),
host_config={'NetworkMode': 'host'})
self.assertTrue("PASS" in logs)
def test_jmx_host_network(self):
# Test from outside the container
logs = utils.run_docker_command(
image="confluentinc/cp-jmxterm",
command=JMX_CHECK.format(client_port=52181, jmx_hostname="localhost", jmx_port="39999"),
host_config={'NetworkMode': 'host'})
self.assertTrue("Version = 3.4.9-1757313, built on 08/23/2016 06:50 GMT;" in logs)
def test_jmx_bridged_network(self):
# Test from outside the container
logs = utils.run_docker_command(
image="confluentinc/cp-jmxterm",
command=JMX_CHECK.format(client_port=2181, jmx_hostname="bridge-network-jmx", jmx_port="9999"),
host_config={'NetworkMode': 'standalone-network-test_zk'})
self.assertTrue("Version = 3.4.9-1757313, built on 08/23/2016 06:50 GMT;" in logs)
class ClusterBridgeNetworkTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
machine_name = os.environ["DOCKER_MACHINE_NAME"]
cls.machine = utils.TestMachine(machine_name)
# Copy SSL files.
cls.machine.ssh("mkdir -p /tmp/zookeeper-bridged-test/secrets")
local_secrets_dir = os.path.join(FIXTURES_DIR, "secrets")
cls.machine.scp_to_machine(local_secrets_dir, "/tmp/zookeeper-bridged-test")
cls.cluster = utils.TestCluster("cluster-test", FIXTURES_DIR, "cluster-bridged.yml")
cls.cluster.start()
# Create keytabs
cls.cluster.run_command_on_service("kerberos", KADMIN_KEYTAB_CREATE.format(filename="zookeeper-bridged-1", principal="zookeeper", hostname="zookeeper-sasl-1"))
cls.cluster.run_command_on_service("kerberos", KADMIN_KEYTAB_CREATE.format(filename="zookeeper-bridged-2", principal="zookeeper", hostname="zookeeper-sasl-2"))
cls.cluster.run_command_on_service("kerberos", KADMIN_KEYTAB_CREATE.format(filename="zookeeper-bridged-3", principal="zookeeper", hostname="zookeeper-sasl-3"))
cls.cluster.run_command_on_service("kerberos", KADMIN_KEYTAB_CREATE.format(filename="zkclient-bridged-1", principal="zkclient", hostname="zookeeper-sasl-1"))
cls.cluster.run_command_on_service("kerberos", KADMIN_KEYTAB_CREATE.format(filename="zkclient-bridged-2", principal="zkclient", hostname="zookeeper-sasl-2"))
cls.cluster.run_command_on_service("kerberos", KADMIN_KEYTAB_CREATE.format(filename="zkclient-bridged-3", principal="zkclient", hostname="zookeeper-sasl-3"))
@classmethod
def tearDownClass(cls):
cls.cluster.shutdown()
utils.run_command_on_host("rm -rf /tmp/zookeeper-bridged-test")
def test_cluster_running(self):
self.assertTrue(self.cluster.is_running())
@classmethod
def is_zk_healthy_for_service(cls, service, client_port, host="localhost"):
output = cls.cluster.run_command_on_service(service, HEALTH_CHECK.format(port=client_port, host=host))
assert "PASS" in output
def test_zookeeper_on_service(self):
self.is_zk_healthy_for_service("zookeeper-1", 2181, "zookeeper-1")
self.is_zk_healthy_for_service("zookeeper-1", 2181, "zookeeper-2")
self.is_zk_healthy_for_service("zookeeper-1", 2181, "zookeeper-3")
client_ports = [22181, 32181, 42181]
expected = sorted(["Mode: follower\n", "Mode: follower\n", "Mode: leader\n"])
outputs = []
for port in client_ports:
output = utils.run_docker_command(
image="confluentinc/cp-zookeeper",
command=MODE_COMMAND.format(port=port),
host_config={'NetworkMode': 'host'})
outputs.append(output)
self.assertEquals(sorted(outputs), expected)
def test_sasl_on_service(self):
self.is_zk_healthy_for_service("zookeeper-sasl-1", 2181, "zookeeper-sasl-1")
self.is_zk_healthy_for_service("zookeeper-sasl-2", 2181, "zookeeper-sasl-2")
self.is_zk_healthy_for_service("zookeeper-sasl-3", 2181, "zookeeper-sasl-3")
# Trying to connect from one container to another doesnot work because
# zk code resolves the dns name to the internal docker container name
# which causes the kerberos authentication to fail.
# Connect to zookeeper-sasl-2 & zookeeper-sasl-3 from zookeeper-sasl-1
# self.is_zk_healthy_for_service("zookeeper-sasl-1", 2181, "zookeeper-sasl-2")
# self.is_zk_healthy_for_service("zookeeper-sasl-1", 2181, "zookeeper-sasl-3")
class ClusterHostNetworkTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
machine_name = os.environ["DOCKER_MACHINE_NAME"]
cls.machine = utils.TestMachine(machine_name)
# Add a hostname mapped to eth0, required for SASL to work predictably.
# localhost and hostname both resolve to 127.0.0.1 in the docker image, so using localhost causes unprodicatable behaviour
# with zkclient
cmd = """
"sudo sh -c 'grep sasl.kafka.com /etc/hosts || echo {IP} sasl.kafka.com >> /etc/hosts'"
""".strip()
cls.machine.ssh(cmd.format(IP=cls.machine.get_internal_ip().strip()))
# Copy SSL files.
cls.machine.ssh("mkdir -p /tmp/zookeeper-host-test/secrets")
local_secrets_dir = os.path.join(FIXTURES_DIR, "secrets")
cls.machine.scp_to_machine(local_secrets_dir, "/tmp/zookeeper-host-test")
cls.cluster = utils.TestCluster("cluster-test", FIXTURES_DIR, "cluster-host.yml")
cls.cluster.start()
# Create keytabs
cls.cluster.run_command_on_service("kerberos", KADMIN_KEYTAB_CREATE.format(filename="zookeeper-host-1", principal="zookeeper", hostname="sasl.kafka.com"))
cls.cluster.run_command_on_service("kerberos", KADMIN_KEYTAB_CREATE.format(filename="zookeeper-host-2", principal="zookeeper", hostname="sasl.kafka.com"))
cls.cluster.run_command_on_service("kerberos", KADMIN_KEYTAB_CREATE.format(filename="zookeeper-host-3", principal="zookeeper", hostname="sasl.kafka.com"))
cls.cluster.run_command_on_service("kerberos", KADMIN_KEYTAB_CREATE.format(filename="zkclient-host-1", principal="zkclient", hostname="sasl.kafka.com"))
cls.cluster.run_command_on_service("kerberos", KADMIN_KEYTAB_CREATE.format(filename="zkclient-host-2", principal="zkclient", hostname="sasl.kafka.com"))
cls.cluster.run_command_on_service("kerberos", KADMIN_KEYTAB_CREATE.format(filename="zkclient-host-3", principal="zkclient", hostname="sasl.kafka.com"))
@classmethod
def tearDownClass(cls):
cls.cluster.shutdown()
utils.run_command_on_host("rm -rf /tmp/zookeeper-host-test")
def test_cluster_running(self):
self.assertTrue(self.cluster.is_running())
@classmethod
def is_zk_healthy_for_service(cls, service, client_port, host="sasl.kafka.com"):
output = cls.cluster.run_command_on_service(service, HEALTH_CHECK.format(port=client_port, host=host))
assert "PASS" in output
def test_zookeeper_on_service(self):
self.is_zk_healthy_for_service("zookeeper-1", 22182)
self.is_zk_healthy_for_service("zookeeper-1", 32182)
self.is_zk_healthy_for_service("zookeeper-1", 42182)
client_ports = [22182, 32182, 42182]
expected = sorted(["Mode: follower\n", "Mode: follower\n", "Mode: leader\n"])
outputs = []
for port in client_ports:
output = utils.run_docker_command(
image="confluentinc/cp-zookeeper",
command=MODE_COMMAND.format(port=port),
host_config={'NetworkMode': 'host'})
outputs.append(output)
self.assertEquals(sorted(outputs), expected)
def test_sasl_on_service(self):
self.is_zk_healthy_for_service("zookeeper-sasl-1", 22182)
self.is_zk_healthy_for_service("zookeeper-sasl-1", 32182)
self.is_zk_healthy_for_service("zookeeper-sasl-1", 42182)
|
033313f2d3373c08ac35a6033c793c00af953a2c
|
89f9da6c0bb99b654f6cb06073fe38f1de2af658
|
/contrib/populate-fixture-data.py
|
774a382aa410c926ac85f57f9629c45f2e061783
|
[
"Apache-2.0"
] |
permissive
|
GerritCodeReview/gerrit
|
01449252ef9b8ee519ab33661cec1229cce1f92d
|
19f3f45ee1c6c245070563529889cb511bcd4b99
|
refs/heads/master
| 2023-08-10T14:01:21.811497
| 2023-08-10T10:57:39
| 2023-08-10T10:58:02
| 47,751,755
| 858
| 210
|
Apache-2.0
| 2023-05-12T18:09:50
| 2015-12-10T09:32:48
|
Java
|
UTF-8
|
Python
| false
| false
| 13,802
|
py
|
populate-fixture-data.py
|
#!/usr/bin/env python3
# Copyright (C) 2016 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This script will populate an empty standard Gerrit instance with some
data for local testing.
TODO(hiesel): Make real git commits instead of empty changes
TODO(hiesel): Add comments
"""
from __future__ import print_function
import argparse
import atexit
import json
import os
import random
import shutil
import subprocess
import tempfile
import requests
import requests.auth
DEFAULT_TMP_PATH = "/tmp"
TMP_PATH = ""
BASE_URL = "http://localhost:%d/a/"
ADMIN_BASIC_AUTH = requests.auth.HTTPBasicAuth("admin", "secret")
# GROUP_ADMIN stores a GroupInfo for the admin group (see Gerrit rest docs)
# In addition, GROUP_ADMIN["name"] stores the admin group"s name.
GROUP_ADMIN = {}
HEADERS = {"Content-Type": "application/json", "charset": "UTF-8"}
# Random names from US Census Data
FIRST_NAMES = [
"Casey", "Yesenia", "Shirley", "Tara", "Wanda", "Sheryl", "Jaime",
"Elaine", "Charlotte", "Carly", "Bonnie", "Kirsten", "Kathryn", "Carla",
"Katrina", "Melody", "Suzanne", "Sandy", "Joann", "Kristie", "Sally",
"Emma", "Susan", "Amanda", "Alyssa", "Patty", "Angie", "Dominique",
"Cynthia", "Jennifer", "Theresa", "Desiree", "Kaylee", "Maureen",
"Jeanne", "Kellie", "Valerie", "Nina", "Judy", "Diamond", "Anita",
"Rebekah", "Stefanie", "Kendra", "Erin", "Tammie", "Tracey", "Bridget",
"Krystal", "Jasmin", "Sonia", "Meghan", "Rebecca", "Jeanette", "Meredith",
"Beverly", "Natasha", "Chloe", "Selena", "Teresa", "Sheena", "Cassandra",
"Rhonda", "Tami", "Jodi", "Shelly", "Angela", "Kimberly", "Terry",
"Joanna", "Isabella", "Lindsey", "Loretta", "Dana", "Veronica", "Carolyn",
"Laura", "Karen", "Dawn", "Alejandra", "Cassie", "Lorraine", "Yolanda",
"Kerry", "Stephanie", "Caitlin", "Melanie", "Kerri", "Doris", "Sandra",
"Beth", "Carol", "Vicki", "Shelia", "Bethany", "Rachael", "Donna",
"Alexandra", "Barbara", "Ana", "Jillian", "Ann", "Rachel", "Lauren",
"Hayley", "Misty", "Brianna", "Tanya", "Danielle", "Courtney",
"Jacqueline", "Becky", "Christy", "Alisha", "Phyllis", "Faith", "Jocelyn",
"Nancy", "Gloria", "Kristen", "Evelyn", "Julie", "Julia", "Kara",
"Chelsey", "Cassidy", "Jean", "Chelsea", "Jenny", "Diana", "Haley",
"Kristine", "Kristina", "Erika", "Jenna", "Alison", "Deanna", "Abigail",
"Melissa", "Sierra", "Linda", "Monica", "Tasha", "Traci", "Yvonne",
"Tracy", "Marie", "Maria", "Michaela", "Stacie", "April", "Morgan",
"Cathy", "Darlene", "Cristina", "Emily" "Ian", "Russell", "Phillip", "Jay",
"Barry", "Brad", "Frederick", "Fernando", "Timothy", "Ricardo", "Bernard",
"Daniel", "Ruben", "Alexis", "Kyle", "Malik", "Norman", "Kent", "Melvin",
"Stephen", "Daryl", "Kurt", "Greg", "Alex", "Mario", "Riley", "Marvin",
"Dan", "Steven", "Roberto", "Lucas", "Leroy", "Preston", "Drew", "Fred",
"Casey", "Wesley", "Elijah", "Reginald", "Joel", "Christopher", "Jacob",
"Luis", "Philip", "Mark", "Rickey", "Todd", "Scott", "Terrence", "Jim",
"Stanley", "Bobby", "Thomas", "Gabriel", "Tracy", "Marcus", "Peter",
"Michael", "Calvin", "Herbert", "Darryl", "Billy", "Ross", "Dustin",
"Jaime", "Adam", "Henry", "Xavier", "Dominic", "Lonnie", "Danny", "Victor",
"Glen", "Perry", "Jackson", "Grant", "Gerald", "Garrett", "Alejandro",
"Eddie", "Alan", "Ronnie", "Mathew", "Dave", "Wayne", "Joe", "Craig",
"Terry", "Chris", "Randall", "Parker", "Francis", "Keith", "Neil", "Caleb",
"Jon", "Earl", "Taylor", "Bryce", "Brady", "Max", "Sergio", "Leon", "Gene",
"Darin", "Bill", "Edgar", "Antonio", "Dalton", "Arthur", "Austin",
"Cristian", "Kevin", "Omar", "Kelly", "Aaron", "Ethan", "Tom", "Isaac",
"Maurice", "Gilbert", "Hunter", "Willie", "Harry", "Dale", "Darius",
"Jerome", "Jason", "Harold", "Kerry", "Clarence", "Gregg", "Shane",
"Eduardo", "Micheal", "Howard", "Vernon", "Rodney", "Anthony", "Levi",
"Larry", "Franklin", "Jimmy", "Jonathon", "Carl",
]
LAST_NAMES = [
"Savage", "Hendrix", "Moon", "Larsen", "Rocha", "Burgess", "Bailey",
"Farley", "Moses", "Schmidt", "Brown", "Hoover", "Klein", "Jennings",
"Braun", "Rangel", "Casey", "Dougherty", "Hancock", "Wolf", "Henry",
"Thomas", "Bentley", "Barnett", "Kline", "Pitts", "Rojas", "Sosa", "Paul",
"Hess", "Chase", "Mckay", "Bender", "Colins", "Montoya", "Townsend",
"Potts", "Ayala", "Avery", "Sherman", "Tapia", "Hamilton", "Ferguson",
"Huang", "Hooper", "Zamora", "Logan", "Lloyd", "Quinn", "Monroe", "Brock",
"Ibarra", "Fowler", "Weiss", "Montgomery", "Diaz", "Dixon", "Olson",
"Robertson", "Arias", "Benjamin", "Abbott", "Stein", "Schroeder", "Beck",
"Velasquez", "Barber", "Nichols", "Ortiz", "Burns", "Moody", "Stokes",
"Wilcox", "Rush", "Michael", "Kidd", "Rowland", "Mclean", "Saunders",
"Chung", "Newton", "Potter", "Hickman", "Ray", "Larson", "Figueroa",
"Duncan", "Sparks", "Rose", "Hodge", "Huynh", "Joseph", "Morales",
"Beasley", "Mora", "Fry", "Ross", "Novak", "Hahn", "Wise", "Knight",
"Frederick", "Heath", "Pollard", "Vega", "Mcclain", "Buckley", "Conrad",
"Cantrell", "Bond", "Mejia", "Wang", "Lewis", "Johns", "Mcknight",
"Callahan", "Reynolds", "Norris", "Burnett", "Carey", "Jacobson", "Oneill",
"Oconnor", "Leonard", "Mckenzie", "Hale", "Delgado", "Spence", "Brandt",
"Obrien", "Bowman", "James", "Avila", "Roberts", "Barker", "Cohen",
"Bradley", "Prince", "Warren", "Summers", "Little", "Caldwell", "Garrett",
"Hughes", "Norton", "Burke", "Holden", "Merritt", "Lee", "Frank", "Wiley",
"Ho", "Weber", "Keith", "Winters", "Gray", "Watts", "Brady", "Aguilar",
"Nicholson", "David", "Pace", "Cervantes", "Davis", "Baxter", "Sanchez",
"Singleton", "Taylor", "Strickland", "Glenn", "Valentine", "Roy",
"Cameron", "Beard", "Norman", "Fritz", "Anthony", "Koch", "Parrish",
"Herman", "Hines", "Sutton", "Gallegos", "Stephenson", "Lozano",
"Franklin", "Howe", "Bauer", "Love", "Ali", "Ellison", "Lester", "Guzman",
"Jarvis", "Espinoza", "Fletcher", "Burton", "Woodard", "Peterson",
"Barajas", "Richard", "Bryan", "Goodman", "Cline", "Rowe", "Faulkner",
"Crawford", "Mueller", "Patterson", "Hull", "Walton", "Wu", "Flores",
"York", "Dickson", "Barnes", "Fisher", "Strong", "Juarez", "Fitzgerald",
"Schmitt", "Blevins", "Villa", "Sullivan", "Velazquez", "Horton",
"Meadows", "Riley", "Barrera", "Neal", "Mendez", "Mcdonald", "Floyd",
"Lynch", "Mcdowell", "Benson", "Hebert", "Livingston", "Davies",
"Richardson", "Vincent", "Davenport", "Osborn", "Mckee", "Marshall",
"Ferrell", "Martinez", "Melton", "Mercer", "Yoder", "Jacobs", "Mcdaniel",
"Mcmillan", "Peters", "Atkinson", "Wood", "Briggs", "Valencia", "Chandler",
"Rios", "Hunter", "Bean", "Hicks", "Hays", "Lucero", "Malone", "Waller",
"Banks", "Myers", "Mitchell", "Grimes", "Houston", "Hampton", "Trujillo",
"Perkins", "Moran", "Welch", "Contreras", "Montes", "Ayers", "Hayden",
"Daniel", "Weeks", "Porter", "Gill", "Mullen", "Nolan", "Dorsey", "Crane",
"Estes", "Lam", "Wells", "Cisneros", "Giles", "Watson", "Vang", "Scott",
"Knox", "Hanna", "Fields",
]
def clean(json_string):
# Strip JSON XSS Tag
json_string = json_string.strip()
if json_string.startswith(")]}'"):
return json_string[5:]
return json_string
def basic_auth(user):
return requests.auth.HTTPBasicAuth(user["username"], user["http_password"])
def fetch_admin_group():
global GROUP_ADMIN
# Get admin group
r = json.loads(clean(requests.get(
BASE_URL + "groups/?suggest=ad&p=All-Projects",
headers=HEADERS,
auth=ADMIN_BASIC_AUTH).text))
admin_group_name = list(r.keys())[0]
GROUP_ADMIN = r[admin_group_name]
GROUP_ADMIN["name"] = admin_group_name
def generate_random_text():
return " ".join([random.choice("lorem ipsum "
"doleret delendam "
"\n esse".split(" ")) for _ in range(1,
100)])
def set_up():
global TMP_PATH
TMP_PATH = tempfile.mkdtemp()
atexit.register(clean_up)
os.makedirs(TMP_PATH + "/ssh")
os.makedirs(TMP_PATH + "/repos")
fetch_admin_group()
def get_random_users(num_users):
users = random.sample([(f, l) for f in FIRST_NAMES for l in LAST_NAMES],
num_users)
names = []
for u in users:
names.append({"firstname": u[0],
"lastname": u[1],
"name": u[0] + " " + u[1],
"username": u[0] + u[1],
"email": u[0] + "." + u[1] + "@gerritcodereview.com",
"http_password": "secret",
"groups": []})
return names
def generate_ssh_keys(gerrit_users):
for user in gerrit_users:
key_file = TMP_PATH + "/ssh/" + user["username"] + ".key"
subprocess.check_output(["ssh-keygen", "-f", key_file, "-N", ""])
with open(key_file + ".pub", "r") as f:
user["ssh_key"] = f.read()
def create_gerrit_groups():
groups = [
{"name": "iOS-Maintainers", "description": "iOS Maintainers",
"visible_to_all": True, "owner": GROUP_ADMIN["name"],
"owner_id": GROUP_ADMIN["id"]},
{"name": "Android-Maintainers", "description": "Android Maintainers",
"visible_to_all": True, "owner": GROUP_ADMIN["name"],
"owner_id": GROUP_ADMIN["id"]},
{"name": "Backend-Maintainers", "description": "Backend Maintainers",
"visible_to_all": True, "owner": GROUP_ADMIN["name"],
"owner_id": GROUP_ADMIN["id"]},
{"name": "Script-Maintainers", "description": "Script Maintainers",
"visible_to_all": True, "owner": GROUP_ADMIN["name"],
"owner_id": GROUP_ADMIN["id"]},
{"name": "Security-Team", "description": "Sec Team",
"visible_to_all": False, "owner": GROUP_ADMIN["name"],
"owner_id": GROUP_ADMIN["id"]}]
for g in groups:
requests.put(BASE_URL + "groups/" + g["name"],
json.dumps(g),
headers=HEADERS,
auth=ADMIN_BASIC_AUTH)
return [g["name"] for g in groups]
def create_gerrit_projects(owner_groups):
projects = [
{"id": "android", "name": "Android", "parent": "All-Projects",
"branches": ["master"], "description": "Our android app.",
"owners": [owner_groups[0]], "create_empty_commit": True},
{"id": "ios", "name": "iOS", "parent": "All-Projects",
"branches": ["master"], "description": "Our ios app.",
"owners": [owner_groups[1]], "create_empty_commit": True},
{"id": "backend", "name": "Backend", "parent": "All-Projects",
"branches": ["master"], "description": "Our awesome backend.",
"owners": [owner_groups[2]], "create_empty_commit": True},
{"id": "scripts", "name": "Scripts", "parent": "All-Projects",
"branches": ["master"], "description": "some small scripts.",
"owners": [owner_groups[3]], "create_empty_commit": True}]
for p in projects:
requests.put(BASE_URL + "projects/" + p["name"],
json.dumps(p),
headers=HEADERS,
auth=ADMIN_BASIC_AUTH)
return [p["name"] for p in projects]
def create_gerrit_users(gerrit_users):
for user in gerrit_users:
requests.put(BASE_URL + "accounts/" + user["username"],
json.dumps(user),
headers=HEADERS,
auth=ADMIN_BASIC_AUTH)
def create_change(user, project_name):
random_commit_message = generate_random_text()
change = {
"project": project_name,
"subject": random_commit_message.split("\n")[0],
"branch": "master",
"status": "NEW",
}
requests.post(BASE_URL + "changes/",
json.dumps(change),
headers=HEADERS,
auth=basic_auth(user))
def clean_up():
shutil.rmtree(TMP_PATH)
def main():
p = argparse.ArgumentParser()
p.add_argument("-u", "--user_count", action="store",
default=100,
type=int,
help="number of users to generate")
p.add_argument("-p", "--port", action="store",
default=8080,
type=int,
help="port of server")
args = p.parse_args()
global BASE_URL
BASE_URL = BASE_URL % args.port
print(BASE_URL)
set_up()
gerrit_users = get_random_users(args.user_count)
group_names = create_gerrit_groups()
for idx, u in enumerate(gerrit_users):
u["groups"].append(group_names[idx % len(group_names)])
if idx % 5 == 0:
# Also add to security group
u["groups"].append(group_names[4])
generate_ssh_keys(gerrit_users)
create_gerrit_users(gerrit_users)
project_names = create_gerrit_projects(group_names)
for idx, u in enumerate(gerrit_users):
for _ in range(random.randint(1, 5)):
create_change(u, project_names[4 * idx // len(gerrit_users)])
main()
|
4e265ca524541f53815b524d7fefec98a3fa8785
|
885d3e4017d96ed9fd56545d95ad63895e6dc01d
|
/rootpy/utils/path.py
|
d95929d072911a1bc0dff1c43593eaeeffc5f5d9
|
[
"BSD-3-Clause"
] |
permissive
|
rootpy/rootpy
|
c3eb7f70d29e4779a0bda8356fb96922bb95537f
|
3926935e1f2100d8ba68070c2ab44055d4800f73
|
refs/heads/master
| 2021-01-17T04:08:51.330059
| 2019-01-05T17:05:50
| 2019-01-05T17:05:50
| 3,276,014
| 159
| 60
|
BSD-3-Clause
| 2019-12-08T12:35:08
| 2012-01-26T18:05:37
|
Python
|
UTF-8
|
Python
| false
| false
| 923
|
py
|
path.py
|
from __future__ import absolute_import
import glob
import os
import errno
__all__ = [
'expand',
'expand_and_glob',
'expand_and_glob_all',
'mkdir_p',
]
def expand(s):
return os.path.expanduser(os.path.expandvars(s))
def expand_and_glob(s):
return glob.glob(expand(s))
def expand_and_glob_all(s):
files = []
for name in s:
files += expand_and_glob(name)
return files
def mkdir_p(path):
"""
mkdir -p functionality
http://stackoverflow.com/questions/600268/mkdir-p-functionality-in-python
In rootpy, this function should be used when creating directories in a
multithreaded environment to avoid race conditions when checking if a
directory exists before creating it.
"""
try:
os.makedirs(path)
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
|
9d43c6580fe8af7a5570074059915038e7195bed
|
faa390890e17219fd763bd66e66bb6753c692b14
|
/jaclearn/rl/env.py
|
321b7fc9240263c0d8454c51795b8cda2ba49ab7
|
[
"MIT"
] |
permissive
|
vacancy/Jacinle
|
7170b1c798e4a903186abe74d28e4a7e034ec766
|
20021790fd32ef1ad40c67fba7582c6db54235da
|
refs/heads/master
| 2023-07-20T03:54:46.693649
| 2023-07-12T21:00:10
| 2023-07-12T21:00:10
| 117,910,172
| 135
| 275
|
MIT
| 2023-01-18T17:41:33
| 2018-01-18T00:35:55
|
Python
|
UTF-8
|
Python
| false
| false
| 4,281
|
py
|
env.py
|
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
# File : env.py
# Author : Jiayuan Mao
# Email : maojiayuan@gmail.com
# Date : 02/17/2018
#
# This file is part of Jacinle.
# Distributed under terms of the MIT license.
import collections
from jacinle.utils.cache import cached_property
__all__ = ['RLEnvBase', 'SimpleRLEnvBase', 'ProxyRLEnvBase']
class RLEnvBase(object):
def __init__(self):
self._stats = collections.defaultdict(list)
@property
def stats(self):
return self._stats
def append_stat(self, name, value):
self._stats[name].append(value)
return self
def clear_stats(self):
self._stats = collections.defaultdict(list)
return self
@cached_property
def action_space(self):
return self._get_action_space()
@property
def current_state(self):
return self._get_current_state()
def action(self, action):
return self._action(action)
def restart(self, *args, **kwargs):
return self._restart(*args, **kwargs)
def finish(self, *args, **kwargs):
return self._finish(*args, **kwargs)
def play_one_episode(self, func, ret_states=False, ret_actions=False, restart_kwargs=None, finish_kwargs=None, max_steps=10000):
states = []
actions = []
self.restart(**(restart_kwargs or {}))
for step in range(max_steps):
state = self.current_state
action = func(state)
r, is_over = self.action(action)
if ret_actions:
actions.append(action)
if ret_states:
states.append(state)
if is_over:
self.finish(**(finish_kwargs or {}))
break
if ret_states:
states.append(self.current_state)
returns = []
if ret_states:
returns.append(states)
if ret_actions:
returns.append(actions)
return returns[0] if len(returns) == 1 else tuple(returns)
def _get_action_space(self):
return None
def _get_current_state(self):
return None
def _action(self, action):
raise NotImplementedError()
def _restart(self, *args, **kwargs):
raise NotImplementedError()
def _finish(self, *args, **kwargs):
pass
@property
def unwrapped(self):
return self
def evaluate_one_episode(self, func):
self.play_one_episode(func)
score = self.stats['score'][-1]
self.clear_stats()
return score
class SimpleRLEnvBase(RLEnvBase):
_current_state = None
def __init__(self):
super().__init__()
self._reward_history = []
def _get_current_state(self):
return self._current_state
def _set_current_state(self, state):
self._current_state = state
def action(self, action):
r, is_over = self._action(action)
self._reward_history.append(r)
return r, is_over
def restart(self, *args, **kwargs):
rc = self._restart(*args, **kwargs)
self._reward_history = []
return rc
def finish(self, *args, **kwargs):
rc = self._finish(*args, **kwargs)
self.append_stat('score', sum(self._reward_history))
self.append_stat('length', len(self._reward_history))
return rc
class ProxyRLEnvBase(RLEnvBase):
def __init__(self, other):
super().__init__()
self.__proxy = other
@property
def proxy(self):
return self.__proxy
@property
def stats(self):
return self.__proxy.stats
def append_stat(self, name, value):
self.__proxy.append_stat(name)
return self
def clear_stats(self):
self.__proxy.clear_stats()
return self
def _get_action_space(self):
return self.__proxy.action_space
def _get_current_state(self):
return self.__proxy.current_state
def _action(self, action):
return self.__proxy.action(action)
def _restart(self, *args, **kwargs):
return self.__proxy.restart(*args, **kwargs)
def _finish(self, *args, **kwargs):
return self.__proxy.finish(*args, **kwargs)
@property
def unwrapped(self):
return self.proxy.unwrapped
|
0383c575dde847f3bbd59fbad8772834cadc578b
|
23a865de75c38f1208f2e6e990475082f35cd407
|
/nssmf/routers.py
|
779811feb08870edd8238133a1e96a16d3f19c9f
|
[
"Apache-2.0"
] |
permissive
|
free5gmano/free5gmano
|
c2a19dc9016750570e5deff46267f4cc426eebab
|
4e2f72a4584a3c1a4bc31eac0952217ceff06a3f
|
refs/heads/master
| 2023-05-25T17:08:01.960558
| 2023-03-31T05:58:47
| 2023-03-31T05:58:47
| 232,234,634
| 107
| 99
|
Apache-2.0
| 2023-05-22T22:17:49
| 2020-01-07T03:26:49
|
Python
|
UTF-8
|
Python
| false
| false
| 1,464
|
py
|
routers.py
|
from rest_framework.routers import Route, DynamicRoute, SimpleRouter
class CustomReadOnlyRouter(SimpleRouter):
routes = [
# List route.
Route(
url=r'^{prefix}{trailing_slash}$',
mapping={
'get': 'list',
'post': 'create'
},
name='{basename}-list',
detail=False,
initkwargs={'suffix': 'List'}
),
# Dynamically generated list routes. Generated using
# @action(detail=False) decorator on methods of the viewset.
DynamicRoute(
url=r'^{prefix}/{url_path}{trailing_slash}$',
name='{basename}-{url_name}',
detail=False,
initkwargs={}
),
# Detail route.
Route(
url=r'^{prefix}/{lookup}{trailing_slash}$',
mapping={
'get': 'retrieve',
'put': 'upload',
'patch': 'update',
'delete': 'destroy'
},
name='{basename}-detail',
detail=True,
initkwargs={'suffix': 'Instance'}
),
# Dynamically generated detail routes. Generated using
# @action(detail=True) decorator on methods of the viewset.
DynamicRoute(
url=r'^{prefix}/{lookup}/{url_path}{trailing_slash}$',
name='{basename}-{url_name}',
detail=True,
initkwargs={}
),
]
|
b35ab4b95bd8ce40391bfa86fe627874e724964f
|
f594560136416be39c32d5ad24dc976aa2cf3674
|
/mmdet/apis/inference.py
|
795fce518c5cf2bcf73c8e84ab40ff0dfea8005c
|
[
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
ShiqiYu/libfacedetection.train
|
bd9eb472c2599cbcb2f028fe7b51294e76868432
|
dce01651d44d2880bcbf4e296ad5ef383a5a611e
|
refs/heads/master
| 2023-07-14T02:37:02.517740
| 2023-06-12T07:42:00
| 2023-06-12T07:42:00
| 245,094,849
| 732
| 206
|
Apache-2.0
| 2023-06-12T07:42:01
| 2020-03-05T07:19:23
|
Python
|
UTF-8
|
Python
| false
| false
| 8,485
|
py
|
inference.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import warnings
from pathlib import Path
import mmcv
import numpy as np
import torch
from mmcv.ops import RoIPool
from mmcv.parallel import collate, scatter
from mmcv.runner import load_checkpoint
from mmdet.core import get_classes
from mmdet.datasets import replace_ImageToTensor
from mmdet.datasets.pipelines import Compose
from mmdet.models import build_detector
def init_detector(config, checkpoint=None, device='cuda:0', cfg_options=None):
"""Initialize a detector from config file.
Args:
config (str, :obj:`Path`, or :obj:`mmcv.Config`): Config file path,
:obj:`Path`, or the config object.
checkpoint (str, optional): Checkpoint path. If left as None, the model
will not load any weights.
cfg_options (dict): Options to override some settings in the used
config.
Returns:
nn.Module: The constructed detector.
"""
if isinstance(config, (str, Path)):
config = mmcv.Config.fromfile(config)
elif not isinstance(config, mmcv.Config):
raise TypeError('config must be a filename or Config object, '
f'but got {type(config)}')
if cfg_options is not None:
config.merge_from_dict(cfg_options)
if 'pretrained' in config.model:
config.model.pretrained = None
elif 'init_cfg' in config.model.backbone:
config.model.backbone.init_cfg = None
config.model.train_cfg = None
model = build_detector(config.model, test_cfg=config.get('test_cfg'))
if checkpoint is not None:
checkpoint = load_checkpoint(model, checkpoint, map_location='cpu')
if 'CLASSES' in checkpoint.get('meta', {}):
model.CLASSES = checkpoint['meta']['CLASSES']
else:
warnings.simplefilter('once')
warnings.warn('Class names are not saved in the checkpoint\'s '
'meta data, use COCO classes by default.')
model.CLASSES = get_classes('coco')
model.cfg = config # save the config in the model for convenience
model.to(device)
model.eval()
return model
class LoadImage:
"""Deprecated.
A simple pipeline to load image.
"""
def __call__(self, results):
"""Call function to load images into results.
Args:
results (dict): A result dict contains the file name
of the image to be read.
Returns:
dict: ``results`` will be returned containing loaded image.
"""
warnings.simplefilter('once')
warnings.warn('`LoadImage` is deprecated and will be removed in '
'future releases. You may use `LoadImageFromWebcam` '
'from `mmdet.datasets.pipelines.` instead.')
if isinstance(results['img'], str):
results['filename'] = results['img']
results['ori_filename'] = results['img']
else:
results['filename'] = None
results['ori_filename'] = None
img = mmcv.imread(results['img'])
results['img'] = img
results['img_fields'] = ['img']
results['img_shape'] = img.shape
results['ori_shape'] = img.shape
return results
def inference_detector(model, imgs):
"""Inference image(s) with the detector.
Args:
model (nn.Module): The loaded detector.
imgs (str/ndarray or list[str/ndarray] or tuple[str/ndarray]):
Either image files or loaded images.
Returns:
If imgs is a list or tuple, the same length list type results
will be returned, otherwise return the detection results directly.
"""
if isinstance(imgs, (list, tuple)):
is_batch = True
else:
imgs = [imgs]
is_batch = False
cfg = model.cfg
device = next(model.parameters()).device # model device
if isinstance(imgs[0], np.ndarray):
cfg = cfg.copy()
# set loading pipeline type
cfg.data.test.pipeline[0].type = 'LoadImageFromWebcam'
cfg.data.test.pipeline = replace_ImageToTensor(cfg.data.test.pipeline)
test_pipeline = Compose(cfg.data.test.pipeline)
datas = []
for img in imgs:
# prepare data
if isinstance(img, np.ndarray):
# directly add img
data = dict(img=img)
else:
# add information into dict
data = dict(img_info=dict(filename=img), img_prefix=None)
# build the data pipeline
data = test_pipeline(data)
datas.append(data)
data = collate(datas, samples_per_gpu=len(imgs))
# just get the actual data from DataContainer
data['img_metas'] = [img_metas.data[0] for img_metas in data['img_metas']]
data['img'] = [img.data[0] for img in data['img']]
if next(model.parameters()).is_cuda:
# scatter to specified GPU
data = scatter(data, [device])[0]
else:
for m in model.modules():
assert not isinstance(
m, RoIPool
), 'CPU inference with RoIPool is not supported currently.'
# forward the model
with torch.no_grad():
results = model(return_loss=False, rescale=True, **data)
if not is_batch:
return results[0]
else:
return results
async def async_inference_detector(model, imgs):
"""Async inference image(s) with the detector.
Args:
model (nn.Module): The loaded detector.
img (str | ndarray): Either image files or loaded images.
Returns:
Awaitable detection results.
"""
if not isinstance(imgs, (list, tuple)):
imgs = [imgs]
cfg = model.cfg
device = next(model.parameters()).device # model device
if isinstance(imgs[0], np.ndarray):
cfg = cfg.copy()
# set loading pipeline type
cfg.data.test.pipeline[0].type = 'LoadImageFromWebcam'
cfg.data.test.pipeline = replace_ImageToTensor(cfg.data.test.pipeline)
test_pipeline = Compose(cfg.data.test.pipeline)
datas = []
for img in imgs:
# prepare data
if isinstance(img, np.ndarray):
# directly add img
data = dict(img=img)
else:
# add information into dict
data = dict(img_info=dict(filename=img), img_prefix=None)
# build the data pipeline
data = test_pipeline(data)
datas.append(data)
data = collate(datas, samples_per_gpu=len(imgs))
# just get the actual data from DataContainer
data['img_metas'] = [img_metas.data[0] for img_metas in data['img_metas']]
data['img'] = [img.data[0] for img in data['img']]
if next(model.parameters()).is_cuda:
# scatter to specified GPU
data = scatter(data, [device])[0]
else:
for m in model.modules():
assert not isinstance(
m, RoIPool
), 'CPU inference with RoIPool is not supported currently.'
# We don't restore `torch.is_grad_enabled()` value during concurrent
# inference since execution can overlap
torch.set_grad_enabled(False)
results = await model.aforward_test(rescale=True, **data)
return results
def show_result_pyplot(model,
img,
result,
score_thr=0.3,
title='result',
wait_time=0,
palette=None,
out_file=None):
"""Visualize the detection results on the image.
Args:
model (nn.Module): The loaded detector.
img (str or np.ndarray): Image filename or loaded image.
result (tuple[list] or list): The detection result, can be either
(bbox, segm) or just bbox.
score_thr (float): The threshold to visualize the bboxes and masks.
title (str): Title of the pyplot figure.
wait_time (float): Value of waitKey param. Default: 0.
palette (str or tuple(int) or :obj:`Color`): Color.
The tuple of color should be in BGR order.
out_file (str or None): The path to write the image.
Default: None.
"""
if hasattr(model, 'module'):
model = model.module
model.show_result(
img,
result,
score_thr=score_thr,
show=True,
wait_time=wait_time,
win_name=title,
bbox_color=palette,
text_color=(200, 200, 200),
mask_color=palette,
out_file=out_file)
|
4739d36f5cab0f83cb0b1b1653b2ef6bd90be639
|
0b134572e3ac3903ebb44df6d4138cbab9d3327c
|
/app/grandchallenge/algorithms/migrations/0033_job_attempt.py
|
f4cae94405e2d44f4d342de3c26f10cdf11066e5
|
[
"Apache-2.0"
] |
permissive
|
comic/grand-challenge.org
|
660de3bafaf8f4560317f1dfd9ae9585ec272896
|
dac25f93b395974b32ba2a8a5f9e19b84b49e09d
|
refs/heads/main
| 2023-09-01T15:57:14.790244
| 2023-08-31T14:23:04
| 2023-08-31T14:23:04
| 4,557,968
| 135
| 53
|
Apache-2.0
| 2023-09-14T13:41:03
| 2012-06-05T09:26:39
|
Python
|
UTF-8
|
Python
| false
| false
| 416
|
py
|
0033_job_attempt.py
|
# Generated by Django 3.2.14 on 2022-07-05 10:28
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("algorithms", "0032_alter_job_time_limit"),
]
operations = [
migrations.AddField(
model_name="job",
name="attempt",
field=models.PositiveSmallIntegerField(default=0, editable=False),
),
]
|
95b7eb6b2d99001b3fd1709e674ef85edb9ad96b
|
bb90ad20468f9fe2039b8c16858bd8eae8bbc050
|
/doc/generate_config_rst.py
|
ef7fe6220d2c3e474562b78f8ac293dd3118968b
|
[
"Apache-2.0"
] |
permissive
|
microsoft/CCF
|
0997fd81a924d36d775b219720b26b4ff196b18a
|
2fbf87840b9e8334c141f4a9c9b25aae979b0540
|
refs/heads/main
| 2023-09-05T15:39:37.265089
| 2023-09-05T15:27:25
| 2023-09-05T15:27:25
| 180,112,558
| 687
| 229
|
Apache-2.0
| 2023-09-14T14:28:39
| 2019-04-08T09:13:04
|
C++
|
UTF-8
|
Python
| false
| false
| 5,476
|
py
|
generate_config_rst.py
|
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the Apache 2.0 License.
import os
import sys
import json
import tempfile
import filecmp
# Generated document is included in existing page, so
# start at heading of depth 1 (equivalent to markdown h2.)
START_DEPTH = 1
class MinimalRstGenerator:
def __init__(self):
self._lines = [".."]
self._lines.append(" This is an auto-generated file. DO NOT EDIT.\n")
def _add_lines(self, lines):
self._lines.extend(lines)
self._lines.append("\n")
def add_heading(self, text, depth):
depth_to_char = {0: "=", 1: "-", 2: "~", 3: "+"}
self._add_lines([text, depth_to_char[depth] * len(text)])
def add_line(self, text):
self._add_lines([text])
def render(self):
return "\n".join(self._lines)
def print_attributes(entry):
def stringify_output(s):
return f"``{json.dumps(s)}``"
desc = ""
if "description" in entry:
desc += entry["description"]
if "enum" in entry:
desc += f'. (values: {", ".join(stringify_output(s) for s in entry["enum"])})'
if "default" in entry:
desc += f'. Default: {stringify_output(entry["default"])}'
if "minimum" in entry:
desc += f'. Minimum: {stringify_output(entry["minimum"])}'
return desc
def print_entry(output, entry, name, required=False, depth=0):
desc = ""
if depth == START_DEPTH:
output.add_heading(f"``{name}``", START_DEPTH)
else:
desc += f"- ``{name}``: "
desc += print_attributes(entry)
if required:
desc += ". Required"
output.add_line(f"{desc}.")
def has_subobjs(obj):
if not isinstance(obj, dict):
return False
return any(
k in ["properties", "additionalProperties", "items"] for k in obj.keys()
) and ("items" not in obj or obj["items"]["type"] == "object")
def print_object(output, obj, depth=0, required_entries=None, additional_desc=None):
required_entries = required_entries or []
for k, v in obj.items():
if has_subobjs(v):
output.add_heading(f"``{k}``", depth)
if "description" in v:
output.add_line(
f'{"**Required.** " if k in required_entries else ""}{v["description"]}.'
)
if additional_desc is not None:
output.add_line(f"Note: {additional_desc}.")
reqs = v.get("required", [])
if "properties" in v:
print_object(
output, v["properties"], depth=depth + 1, required_entries=reqs
)
# Strict schema with no extra fields allowed https://github.com/microsoft/CCF/issues/3813
assert (
"allOf" in v or v.get("additionalProperties") == False
), f"AdditionalProperties not set to false in {k}:{v}"
if "additionalProperties" in v:
if isinstance(v["additionalProperties"], dict):
print_object(
output,
v["additionalProperties"]["properties"],
depth=depth + 1,
required_entries=v["additionalProperties"].get("required", []),
)
if "items" in v and v["items"]["type"] == "object":
print_object(
output,
v["items"]["properties"],
depth=depth + 1,
required_entries=reqs,
)
if "allOf" in v:
for e in v["allOf"]:
((k_, cond_),) = e["if"]["properties"].items()
print_object(
output,
e["then"]["properties"],
depth=depth + 1,
required_entries=reqs,
additional_desc=f'Only if ``{k_}`` is ``"{cond_["const"]}"``',
)
elif k == "additionalProperties" and isinstance(v, bool):
# Skip display of additionalProperties if bool as it is used
# to make the schema stricter
pass
else:
print_entry(output, v, name=k, required=k in required_entries, depth=depth)
def generate_configuration_docs(input_file_path, output_file_path):
with open(input_file_path, "r") as in_:
j = json.load(in_)
output = MinimalRstGenerator()
output.add_heading("Configuration Options", START_DEPTH)
print_object(
output, j["properties"], required_entries=j["required"], depth=START_DEPTH
)
assert (
j.get("additionalProperties") == False
), f"AdditionalProperties not set to false in top level schema"
out = output.render()
# Only update output file if the file will be modified
with tempfile.NamedTemporaryFile("w") as temp:
temp.write(out)
temp.flush()
if not os.path.exists(output_file_path) or not filecmp.cmp(
temp.name, output_file_path
):
with open(output_file_path, "w") as out_:
out_.write(output.render())
print(f"Configuration file successfully generated at {output_file_path}")
if __name__ == "__main__":
if len(sys.argv) <= 2:
print(f"Usage: {sys.argv[0]} <input_path> <output_path>")
sys.exit(1)
generate_configuration_docs(sys.argv[1], sys.argv[2])
|
aad4dc1ede0a4a66e66fdd323e2c1d16a2c0afb6
|
12f0bd77926127cdacc2452d6f9cfed91806b2fe
|
/idaes/apps/grid_integration/tests/test_backcaster.py
|
0b8368c7feae9577db7da3ee61fe215d1303a915
|
[
"BSD-2-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
IDAES/idaes-pse
|
e03d2583ae1ba968a7099f9f439fd8c3efa12904
|
deacf4c422bc9e50cb347e11a8cbfa0195bd4274
|
refs/heads/main
| 2023-08-16T19:13:00.355572
| 2023-08-04T04:19:29
| 2023-08-04T04:19:29
| 168,622,088
| 173
| 227
|
NOASSERTION
| 2023-09-11T16:04:55
| 2019-02-01T01:12:51
|
Python
|
UTF-8
|
Python
| false
| false
| 10,743
|
py
|
test_backcaster.py
|
#################################################################################
# The Institute for the Design of Advanced Energy Systems Integrated Platform
# Framework (IDAES IP) was produced under the DOE Institute for the
# Design of Advanced Energy Systems (IDAES).
#
# Copyright (c) 2018-2023 by the software owners: The Regents of the
# University of California, through Lawrence Berkeley National Laboratory,
# National Technology & Engineering Solutions of Sandia, LLC, Carnegie Mellon
# University, West Virginia University Research Corporation, et al.
# All rights reserved. Please see the files COPYRIGHT.md and LICENSE.md
# for full copyright and license information.
#################################################################################
import pytest
from pyomo.common import unittest as pyo_unittest
from idaes.apps.grid_integration.forecaster import ForecastError, Backcaster
import idaes.logger as idaeslog
@pytest.fixture
def historical_da_prices():
return {"test_bus": [1] * 24 + [2] * 24 + [3] * 24}
@pytest.fixture
def historical_rt_prices():
return {"test_bus": [10] * 24 + [20] * 24 + [30] * 24}
@pytest.fixture
def base_backcaster(historical_da_prices, historical_rt_prices):
return Backcaster(historical_da_prices, historical_rt_prices)
@pytest.mark.unit
def test_create_backcaster(historical_da_prices, historical_rt_prices):
backcaster = Backcaster(historical_da_prices, historical_rt_prices)
assert backcaster.historical_da_prices is historical_da_prices
assert backcaster.historical_rt_prices is historical_rt_prices
@pytest.mark.unit
def test_create_backcaster_with_small_max_historical_days(
caplog, historical_da_prices, historical_rt_prices
):
max_n_days = 1
with caplog.at_level(idaeslog.WARNING):
backcaster = Backcaster(
historical_da_prices, historical_rt_prices, max_historical_days=max_n_days
)
_warn_msg = (
f"The number of days in the input historical prices for bus test_bus is greater than the max value 1."
f" Dropping the data for the first 2 day(s)."
)
assert _warn_msg in caplog.text
expected_historical_da_prices = {"test_bus": [3] * 24}
expected_historical_rt_prices = {"test_bus": [30] * 24}
pyo_unittest.assertStructuredAlmostEqual(
first=backcaster.historical_da_prices, second=expected_historical_da_prices
)
pyo_unittest.assertStructuredAlmostEqual(
first=backcaster.historical_rt_prices, second=expected_historical_rt_prices
)
@pytest.mark.unit
@pytest.mark.parametrize("value", ["10", "ten", [10]])
def test_invalid_max_historical_days_type(
value, historical_da_prices, historical_rt_prices
):
with pytest.raises(TypeError, match=r".*max_historical_days must be a number.*"):
Backcaster(
historical_da_prices, historical_rt_prices, max_historical_days=value
)
@pytest.mark.unit
@pytest.mark.parametrize("value", [-10, 0, -0.0])
def test_invalid_max_historical_days_value(
value, historical_da_prices, historical_rt_prices
):
with pytest.raises(ValueError, match=r".*max_historical_days must be >= 1.*"):
Backcaster(
historical_da_prices, historical_rt_prices, max_historical_days=value
)
@pytest.mark.unit
def test_create_backcaster_with_non_dict(historical_da_prices, historical_rt_prices):
with pytest.raises(
TypeError, match=r"Given historical price is not an dictionary object.*"
):
Backcaster(historical_da_prices, [])
Backcaster(100, historical_rt_prices)
@pytest.mark.unit
def test_create_backcaster_with_empty_dict(historical_da_prices, historical_rt_prices):
with pytest.raises(ValueError, match=r"Given historical price is empty."):
Backcaster({}, historical_rt_prices)
Backcaster(historical_da_prices, {})
@pytest.mark.unit
def test_create_backcaster_with_non_list(historical_da_prices, historical_rt_prices):
with pytest.raises(TypeError, match=r".*bus test_bus is not a list object.*"):
Backcaster({"test_bus": {1, 2, 3}}, historical_rt_prices)
Backcaster(historical_da_prices, {"test_bus": {1, 2, 3}})
@pytest.mark.unit
def test_create_backcaster_with_not_enough_entries(
historical_da_prices, historical_rt_prices
):
with pytest.raises(
ValueError, match=r".*At least a day of the historical prices.*"
):
Backcaster({"test_bus": [1] * 23}, historical_rt_prices)
Backcaster(historical_da_prices, {"test_bus": [1] * 2})
@pytest.mark.unit
def test_create_backcaster_with_non_24_entries(
historical_da_prices, historical_rt_prices
):
with pytest.raises(ValueError, match=r".*should be a multiple of 24.*"):
Backcaster({"test_bus": [1] * 47}, historical_rt_prices)
Backcaster(historical_da_prices, {"test_bus": [1] * 46})
@pytest.mark.unit
def test_forecast_real_time_prices(base_backcaster):
n_samples = 3
horizon = 4
result_forecasts = base_backcaster.forecast_real_time_prices(
date="2022-05-11", hour=18, bus="test_bus", horizon=horizon, n_samples=n_samples
)
expected_forecasts = {
n_samples - i - 1: [(i + 1) * 10] * horizon for i in range(n_samples)
}
pyo_unittest.assertStructuredAlmostEqual(
first=result_forecasts, second=expected_forecasts
)
@pytest.mark.unit
def test_forecast_day_ahead_prices(base_backcaster):
n_samples = 2
horizon = 48
result_forecasts = base_backcaster.forecast_day_ahead_prices(
date="2022-05-11", hour=0, bus="test_bus", horizon=horizon, n_samples=n_samples
)
expected_forecasts = {0: [3] * 24 + [1] * 24, 1: [2] * 24 + [3] * 24}
pyo_unittest.assertStructuredAlmostEqual(
first=result_forecasts, second=expected_forecasts
)
@pytest.mark.unit
def test_forecast_day_ahead_and_real_time_prices(base_backcaster):
(
da_result_forecasts,
rt_forecasts,
) = base_backcaster.forecast_day_ahead_and_real_time_prices(
date="2022-05-11", hour=0, bus="test_bus", horizon=48, n_samples=2
)
expected_da_forecasts = {0: [3] * 24 + [1] * 24, 1: [2] * 24 + [3] * 24}
pyo_unittest.assertStructuredAlmostEqual(
first=da_result_forecasts, second=expected_da_forecasts
)
(
da_forecasts,
rt_result_forecasts,
) = base_backcaster.forecast_day_ahead_and_real_time_prices(
date="2022-05-11", hour=18, bus="test_bus", horizon=4, n_samples=3
)
expected_rt_forecasts = {3 - i - 1: [(i + 1) * 10] * 4 for i in range(3)}
pyo_unittest.assertStructuredAlmostEqual(
first=rt_result_forecasts, second=expected_rt_forecasts
)
@pytest.mark.unit
def test_forecast_nonexistent_bus_prices(base_backcaster):
wrong_bus = "test_bussss"
n_samples = 3
horizon = 4
with pytest.raises(
ForecastError, match=r"No test_bussss real-time price available"
):
base_backcaster.forecast_real_time_prices(
date="2022-05-11",
hour=18,
bus=wrong_bus,
horizon=horizon,
n_samples=n_samples,
)
n_samples = 2
horizon = 48
with pytest.raises(
ForecastError, match=r"No test_bussss day-ahead price available"
):
result_forecasts = base_backcaster.forecast_day_ahead_prices(
date="2022-05-11",
hour=0,
bus=wrong_bus,
horizon=horizon,
n_samples=n_samples,
)
class MockPrescientHourlyStats:
def __init__(self, bus_to_prices_dict) -> None:
self.observed_bus_LMPs = bus_to_prices_dict
@pytest.mark.unit
def test_fetch_hourly_stats_from_prescient(base_backcaster, historical_rt_prices):
prescient_hourly_stats = MockPrescientHourlyStats({"test_bus": 15})
base_backcaster.fetch_hourly_stats_from_prescient(prescient_hourly_stats)
expected_current_day_rt_prices = {}
expected_current_day_rt_prices["test_bus"] = [15]
pyo_unittest.assertStructuredAlmostEqual(
first=expected_current_day_rt_prices["test_bus"],
second=base_backcaster._current_day_rt_prices["test_bus"],
)
for i in range(23):
prescient_hourly_stats.observed_bus_LMPs["test_bus"] = 15
base_backcaster.fetch_hourly_stats_from_prescient(prescient_hourly_stats)
expected_current_day_rt_prices_1 = {}
expected_current_day_rt_prices_1["test_bus"] = []
expected_historical_rt_prices = [10] * 24 + [20] * 24 + [30] * 24 + [15] * 24
pyo_unittest.assertStructuredAlmostEqual(
first=expected_current_day_rt_prices_1["test_bus"],
second=base_backcaster._current_day_rt_prices["test_bus"],
)
pyo_unittest.assertStructuredAlmostEqual(
first=expected_historical_rt_prices,
second=base_backcaster._historical_rt_prices["test_bus"],
)
@pytest.mark.unit
def test_fetch_hourly_stats_from_prescient_greater_than_max_historical_days(
base_backcaster, historical_rt_prices
):
days = 8
target_lmp = []
for day in range(days):
for t in range(24):
prescient_hourly_stats = MockPrescientHourlyStats({"test_bus": day * 10})
base_backcaster.fetch_hourly_stats_from_prescient(prescient_hourly_stats)
target_lmp.append(day * 10)
expected_historical_rt_prices = [20] * 24 + [30] * 24 + target_lmp
pyo_unittest.assertStructuredAlmostEqual(
first=expected_historical_rt_prices,
second=base_backcaster._historical_rt_prices["test_bus"],
)
class DAPrices:
def __init__(self, da_prices) -> None:
self.day_ahead_prices = da_prices
def get(self, info):
bus, t = info
return self.day_ahead_prices[bus][t]
class MockRucMarket:
def __init__(self, da_prices) -> None:
self.day_ahead_prices = DAPrices(da_prices)
class MockPrescientRucPlan:
def __init__(self, da_prices) -> None:
self.ruc_market = MockRucMarket(da_prices)
@pytest.mark.unit
def test_fetch_day_ahead_stats_from_prescient(base_backcaster, historical_da_prices):
for i in range(base_backcaster.max_historical_days + 1):
da_price = {"test_bus": [i] * 24}
day_ahead_result = MockPrescientRucPlan(da_price)
base_backcaster.fetch_day_ahead_stats_from_prescient(
None, None, day_ahead_result
)
expected_historical_da_prices = {}
expected_historical_da_prices["test_bus"] = []
for i in range(1, base_backcaster.max_historical_days + 1):
expected_historical_da_prices["test_bus"] += [i] * 24
pyo_unittest.assertStructuredAlmostEqual(
first=expected_historical_da_prices,
second=base_backcaster._historical_da_prices,
)
|
a87a2cdbe32ab51b29c20ee18a186003ef629b8f
|
c3542b98289c1ba85f62d08b5edbe1a3c18f3c80
|
/snake.py
|
3c66cc599d4b995d7564f1b1ed1da8749957bbd9
|
[
"LicenseRef-scancode-unknown",
"GPL-1.0-or-later",
"MIT"
] |
permissive
|
geekcomputers/Python
|
16674289843f89f6cc287097f033b928f4181d84
|
bc55e2a2c5a98f4c7597e901a04457dfb9d5df0c
|
refs/heads/master
| 2023-08-18T21:04:18.163283
| 2023-08-17T17:38:16
| 2023-08-17T17:38:16
| 2,881,789
| 32,418
| 15,024
|
MIT
| 2023-09-02T18:40:33
| 2011-11-30T09:04:08
|
Python
|
UTF-8
|
Python
| false
| false
| 3,929
|
py
|
snake.py
|
# SNAKES GAME
# Use ARROW KEYS to play, SPACE BAR for pausing/resuming and Esc Key for exiting
# Original Author : Sanchit Gangwar
# Modified by : Rayan Dutta
# Minor changes made to keep the game working.
try:
import curses
from time import sleep
from curses import KEY_RIGHT, KEY_LEFT, KEY_UP, KEY_DOWN
from random import randint
print(
"Use the arrow keys to move, press the space bar to pause, and press ESC to quit"
)
sleep(1)
key = KEY_RIGHT # Initializing values
curses.initscr()
win = curses.newwin(20, 60, 0, 0)
win.keypad(1)
curses.noecho()
curses.curs_set(0)
win.border(0)
win.nodelay(1)
x, y = win.getmaxyx()
key = KEY_DOWN # Initializing values
score = 0
s = open(".snake_highscore.txt", "r")
hscore = s.read()
s.close()
snake = [[4, 10], [4, 9], [4, 8]] # Initial snake co-ordinates
food = [10, 20] # First food co-ordinates
win.addch(food[0], food[1], "*") # Prints or shows the food
while key != 27: # While Esc key is not pressed
win.border(0)
win.addstr(0, 2, "Score : " + str(score) + " ") # Printing 'Score' and
win.addstr(0, 27, " SNAKE ") # 'SNAKE' strings
win.addstr(0, 37, "Highscore: " + str(hscore) + " ")
win.timeout(
int(150 - (len(snake) / 5 + len(snake) / 10) % 120)
) # Increases the speed of Snake as its length increases
prevKey = key # Previous key pressed
event = win.getch()
key = key if event == -1 else event
if key == ord(" "): # If SPACE BAR is pressed, wait for another
key = -1 # one (Pause/Resume)
win.addstr(0, 40, "PAUSED")
while key != ord(" "):
key = win.getch()
key = prevKey
continue
if key not in [
KEY_LEFT,
KEY_RIGHT,
KEY_UP,
KEY_DOWN,
27,
]: # If an invalid key is pressed
key = prevKey
# Calculates the new coordinates of the head of the snake. NOTE: len(snake) increases.
# This is taken care of later at [1].
snake.insert(
0,
[
snake[0][0] + (key == KEY_DOWN and 1) + (key == KEY_UP and -1),
snake[0][1] + (key == KEY_LEFT and -1) + (key == KEY_RIGHT and 1),
],
)
# If snake crosses the boundaries, make it enter from the other side
if snake[0][0] == 0:
snake[0][0] = 18
if snake[0][1] == 0:
snake[0][1] = 58
if snake[0][0] == 19:
snake[0][0] = 1
if snake[0][1] == 59:
snake[0][1] = 1
# Exit if snake crosses the boundaries (Uncomment to enable)
# if snake[0][0] == 0 or snake[0][0] == 19 or snake[0][1] == 0 or snake[0][1] == 59: break
# If snake runs over itself
if snake[0] in snake[1:]:
break
if snake[0] == food: # When snake eats the food
food = []
score += 1
while food == []:
food = [
randint(1, 18),
randint(1, 58),
] # Calculating next food's coordinates
if food in snake:
food = []
win.addch(food[0], food[1], "*")
else:
last = snake.pop() # [1] If it does not eat the food, length decreases
win.addch(last[0], last[1], " ")
win.addch(snake[0][0], snake[0][1], "#")
except KeyboardInterrupt or EOFError:
curses.endwin()
print("Score - " + str(score))
if score > int(hscore):
s = open(".snake_highscore.txt", "w")
s.write(str(score))
s.close()
curses.endwin()
if score > int(hscore):
s = open(".snake_highscore.txt", "w")
s.write(str(score))
s.close()
print("Score - " + str(score))
|
694ca6d5ce07e3a4254d37eeba1640f1e015613a
|
a2ca2b43bbc24b5479071033b4ba66442b466c2b
|
/.local/bin/set-theme
|
1080af9e430b9dfc5e148b1163b7a980d3463bd5
|
[
"MIT"
] |
permissive
|
nickjj/dotfiles
|
12e20c628963a5be02ffe19871bee6130cfb2538
|
724a4ced674d2bb40f8d0bf1817c46761d1a3100
|
refs/heads/master
| 2023-08-30T06:26:30.000292
| 2023-07-29T21:52:34
| 2023-07-29T21:53:30
| 162,295,765
| 940
| 279
|
MIT
| 2022-05-06T21:56:46
| 2018-12-18T13:53:57
|
Vim script
|
UTF-8
|
Python
| false
| false
| 5,054
|
set-theme
|
#!/usr/bin/env python3
import argparse
import fileinput
import os
import re
import sys
import textwrap
from pathlib import Path
from subprocess import run, PIPE
THEMES = {
"gruvbox": {
"dark": {
"terminal": {
"colorScheme": "Gruvbox Dark",
"cursorColor": "#ffb261",
},
"tmux": {
"status-style": "fg=colour244",
"window-status-current-style": "fg=colour222",
"pane-border-style": "fg=colour240",
"pane-active-border-style": "fg=colour243"
}
},
"light": {
"terminal": {
"colorScheme": "Gruvbox Light",
"cursorColor": "#ffb261",
},
"tmux": {
"status-style": "fg=colour179",
"window-status-current-style": "fg=colour172",
"pane-border-style": "fg=colour186",
"pane-active-border-style": "fg=colour178"
}
}
},
"one": {
"dark": {
"terminal": {
"colorScheme": "One Half Dark",
"cursorColor": "#6de6f5",
},
"tmux": {
"status-style": "fg=colour110",
"window-status-current-style": "fg=colour39",
"pane-border-style": "fg=colour240",
"pane-active-border-style": "fg=colour243"
}
},
"light": {
"terminal": {
"colorScheme": "One Half Light",
"cursorColor": "#6de6f5",
},
"tmux": {
"status-style": "fg=colour110",
"window-status-current-style": "fg=colour39",
"pane-border-style": "fg=colour253",
"pane-active-border-style": "fg=colour250"
}
}
}
}
def get_windows_user():
result = run(['powershell.exe', '$env:UserName'],
stdout=PIPE,
universal_newlines=True)
return result.stdout.rstrip()
HOME = str(Path.home())
TERMINAL_CONFIG = f'/c/Users/{get_windows_user()}/AppData/Local/Packages/Microsoft.WindowsTerminal_8wekyb3d8bbwe/LocalState/settings.json' # noqa: E501
SHELL_CONFIG = f'{os.environ.get("XDG_CONFIG_HOME")}/zsh/.zshrc'
TMUX_CONFIG = f'{HOME}/.tmux.conf'
VIM_CONFIG = f'{HOME}/.vimrc'
def edit_inplace(file, preserve_symlinks=True):
if preserve_symlinks:
file = os.path.realpath(file)
return fileinput.input(files=(file,), inplace=True)
def active_theme_and_background():
with open(VIM_CONFIG, 'r') as f:
for line in f:
match = re.match('^set background=(.*)$', line)
if match:
bg = match.group(1)
continue
match = re.match('^colorscheme (.*$)$', line)
if match:
theme = match.group(1)
continue
try:
return theme, bg
except NameError:
sys.exit('error: "set background" or "colorscheme" statement not found'
f' in {VIM_CONFIG}')
def change_terminal_theme(theme, bg):
terminal_options = THEMES[theme][bg]['terminal']
with edit_inplace(TERMINAL_CONFIG) as f:
for line in f:
for key, value in terminal_options.items():
line = re.sub(rf'"{key}": ".*"', f'"{key}": "{value}"', line)
sys.stdout.write(line)
def change_tmux_theme(theme, bg):
tmux_options = THEMES[theme][bg]['tmux']
with edit_inplace(TMUX_CONFIG) as f:
for line in f:
for key, value in tmux_options.items():
line = re.sub(rf'^set -g {key} .*$', f'set -g {key} {value}',
line)
sys.stdout.write(line)
run(['tmux', 'source-file', TMUX_CONFIG])
def change_vim_theme(theme, bg):
with edit_inplace(VIM_CONFIG) as f:
for line in f:
line = re.sub(r'^colorscheme .*$', f'colorscheme {theme}', line)
line = re.sub(r'^set background=.*$', f'set background={bg}', line)
sys.stdout.write(line)
def change_fzf_theme(bg):
with edit_inplace(SHELL_CONFIG) as f:
for line in f:
re.sub(r'FZF_DEFAULT_OPTS="--color=.*"$',
f'FZF_DEFAULT_OPTS="--color={bg}"', line)
sys.stdout.write(line)
def parseargs():
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description=textwrap.dedent('''\
Set a theme along with optionally toggling dark and light backgrounds.
'''))
parser.add_argument('theme', choices=THEMES, nargs='?',
help='the theme name')
parser.add_argument('--toggle-bg', action='store_true',
help='toggle the background between dark and light')
args = parser.parse_args()
if len(sys.argv) == 1:
parser.error('at least one argument is required')
return args
def main():
args = parseargs()
theme, bg = active_theme_and_background()
if args.theme:
theme = args.theme
if args.toggle_bg:
bg = 'light' if bg == 'dark' else 'dark'
change_terminal_theme(theme, bg)
change_tmux_theme(theme, bg)
change_vim_theme(theme, bg)
change_fzf_theme(bg)
if __name__ == '__main__':
main()
|
|
3d65b4c44a5a4fa569dd08d1fefee4173be8d9cf
|
5dc77586e3e0f9de1f032fd2ca68494d8e58928f
|
/great_expectations/datasource/batch_kwargs_generator/subdir_reader_batch_kwargs_generator.py
|
7e8a620278b20885394537e50d8af576ede8d246
|
[
"Apache-2.0"
] |
permissive
|
great-expectations/great_expectations
|
dd7c22e6277d6b08bee3ff38a015e6e8cd434df6
|
b0290e2fd2aa05aec6d7d8871b91cb4478e9501d
|
refs/heads/develop
| 2023-09-04T09:30:26.395518
| 2023-09-02T00:00:13
| 2023-09-02T00:00:13
| 103,071,520
| 8,931
| 1,535
|
Apache-2.0
| 2023-09-14T19:57:16
| 2017-09-11T00:18:46
|
Python
|
UTF-8
|
Python
| false
| false
| 11,415
|
py
|
subdir_reader_batch_kwargs_generator.py
|
import logging
import os
from typing import Dict, Iterable
from great_expectations.datasource.batch_kwargs_generator.batch_kwargs_generator import (
BatchKwargsGenerator,
)
from great_expectations.datasource.types import PathBatchKwargs
from great_expectations.exceptions import BatchKwargsError
logger = logging.getLogger(__name__)
KNOWN_EXTENSIONS = [
".csv",
".tsv",
".parquet",
".parq",
".pqt",
".xls",
".xlsx",
".json",
".csv.gz",
".tsv.gz",
".feather",
".pkl",
]
# 20230830 - Chetan - Is open for deletion but is used in a number of tests; once decoupled, we should remove this
class SubdirReaderBatchKwargsGenerator(BatchKwargsGenerator):
"""The SubdirReaderBatchKwargsGenerator inspects a filesystem and produces path-based batch_kwargs.
SubdirReaderBatchKwargsGenerator recognizes data assets using two criteria:
- for files directly in 'base_directory' with recognized extensions (.csv, .tsv, .parquet, .xls, .xlsx, .json
.csv.gz, tsv.gz, .feather, .pkl), it uses the name of the file without the extension
- for other files or directories in 'base_directory', is uses the file or directory name
SubdirReaderBatchKwargsGenerator sees all files inside a directory of base_directory as batches of one datasource.
SubdirReaderBatchKwargsGenerator can also include configured reader_options which will be added to batch_kwargs generated
by this generator.
"""
_default_reader_options: Dict = {}
recognized_batch_parameters = {"data_asset_name", "partition_id"}
def __init__( # noqa: PLR0913
self,
name="default",
datasource=None,
base_directory="/data",
reader_options=None,
known_extensions=None,
reader_method=None,
) -> None:
super().__init__(name, datasource=datasource)
if reader_options is None:
reader_options = self._default_reader_options
if known_extensions is None:
known_extensions = KNOWN_EXTENSIONS
self._known_extensions = known_extensions
self._reader_options = reader_options
self._reader_method = reader_method
self._base_directory = base_directory
@property
def reader_options(self):
return self._reader_options
@property
def known_extensions(self):
return self._known_extensions
@property
def reader_method(self):
return self._reader_method
@property
def base_directory(self):
# If base directory is a relative path, interpret it as relative to the data context's
# context root directory (parent directory of great_expectation dir)
if (
os.path.isabs(self._base_directory) # noqa: PTH117
or self._datasource.data_context is None
):
return self._base_directory
else:
return os.path.join( # noqa: PTH118
self._datasource.data_context.root_directory, self._base_directory
)
def get_available_data_asset_names(self):
if not os.path.isdir(self.base_directory): # noqa: PTH112
return {"names": [], "is_complete_list": True}
known_assets = self._get_valid_file_options(base_directory=self.base_directory)
return {"names": known_assets, "is_complete_list": True}
def get_available_partition_ids(self, data_asset_name=None):
# If the asset names a single known *file*, return ONLY that
for extension in self.known_extensions:
if os.path.isfile( # noqa: PTH113
os.path.join( # noqa: PTH118
self.base_directory, data_asset_name + extension
)
):
return [data_asset_name]
if os.path.isfile( # noqa: PTH113
os.path.join(self.base_directory, data_asset_name) # noqa: PTH118
):
return [data_asset_name]
# Otherwise, subdir files are partition ids
return [
path
for (path, type) in self._get_valid_file_options(
base_directory=os.path.join( # noqa: PTH118
self.base_directory, data_asset_name
)
)
]
def _build_batch_kwargs(self, batch_parameters):
"""
Args:
batch_parameters:
Returns:
batch_kwargs
"""
try:
data_asset_name = batch_parameters.pop("data_asset_name")
except KeyError:
raise BatchKwargsError(
"Unable to build BatchKwargs: no name provided in batch_parameters.",
batch_kwargs=batch_parameters,
)
if "partition_id" in batch_parameters:
partition_id = batch_parameters.pop("partition_id")
# Find the path
path = None
for extension in self.known_extensions:
if os.path.isfile( # noqa: PTH113
os.path.join( # noqa: PTH118
self.base_directory, data_asset_name, partition_id + extension
)
):
path = os.path.join( # noqa: PTH118
self.base_directory, data_asset_name, partition_id + extension
)
if path is None:
logger.warning(
"Unable to find path with the provided partition; searching for asset-name partitions."
)
# Fall through to this case in the event that there is not a subdir available, or if partition_id was
# not provided
if os.path.isfile( # noqa: PTH113
os.path.join(self.base_directory, data_asset_name) # noqa: PTH118
):
path = os.path.join( # noqa: PTH118
self.base_directory, data_asset_name
)
for extension in self.known_extensions:
if os.path.isfile( # noqa: PTH113
os.path.join( # noqa: PTH118
self.base_directory, data_asset_name + extension
)
):
path = os.path.join( # noqa: PTH118
self.base_directory, data_asset_name + extension
)
if path is None:
raise BatchKwargsError(
f"Unable to build batch kwargs from for asset '{data_asset_name}'",
batch_parameters,
)
return self._build_batch_kwargs_from_path(path, **batch_parameters)
else:
return self.yield_batch_kwargs(
data_asset_name=data_asset_name, **batch_parameters
)
def _get_valid_file_options(self, base_directory=None):
valid_options = []
if base_directory is None:
base_directory = self.base_directory
file_options = os.listdir(base_directory)
for file_option in file_options:
for extension in self.known_extensions:
if (
file_option.endswith(extension)
and not file_option.startswith(".")
and (file_option[: -len(extension)], "file") not in valid_options
):
valid_options.append((file_option[: -len(extension)], "file"))
elif os.path.isdir( # noqa: PTH112
os.path.join(self.base_directory, file_option) # noqa: PTH118
):
# Make sure there's at least one valid file inside the subdir
subdir_options = self._get_valid_file_options(
base_directory=os.path.join( # noqa: PTH118
base_directory, file_option
)
)
if (
len(subdir_options) > 0
and (file_option, "directory") not in valid_options
):
valid_options.append((file_option, "directory"))
return valid_options
def _get_iterator(self, data_asset_name, reader_options=None, limit=None):
logger.debug(
f"Beginning SubdirReaderBatchKwargsGenerator _get_iterator for data_asset_name: {data_asset_name}"
)
# If the data asset is a file, then return the path.
# Otherwise, use files in a subdir as batches
if os.path.isdir( # noqa: PTH112
os.path.join(self.base_directory, data_asset_name) # noqa: PTH118
):
subdir_options = os.listdir(
os.path.join(self.base_directory, data_asset_name) # noqa: PTH118
)
batches = []
for file_option in subdir_options:
for extension in self.known_extensions:
if file_option.endswith(extension) and not file_option.startswith(
"."
):
batches.append(
os.path.join( # noqa: PTH118
self.base_directory, data_asset_name, file_option
)
)
return self._build_batch_kwargs_path_iter(
batches, reader_options=reader_options, limit=limit
)
else:
for extension in self.known_extensions:
path = os.path.join( # noqa: PTH118
self.base_directory, data_asset_name + extension
)
if os.path.isfile(path): # noqa: PTH113
return iter(
[
self._build_batch_kwargs_from_path(
path, reader_options=reader_options, limit=limit
)
]
)
# If we haven't returned yet, raise
raise BatchKwargsError(
"No valid files found when searching {:s} using configured known_extensions: "
"{:s} ".format(
os.path.join(self.base_directory, data_asset_name), # noqa: PTH118
", ".join(map(str, self.known_extensions)),
),
batch_kwargs=PathBatchKwargs(
path=os.path.join( # noqa: PTH118
self.base_directory, data_asset_name
)
),
)
def _build_batch_kwargs_path_iter(
self, path_list, reader_options=None, limit=None
) -> Iterable[PathBatchKwargs]:
for path in path_list:
yield self._build_batch_kwargs_from_path(
path, reader_options=reader_options, limit=limit
)
def _build_batch_kwargs_from_path(
self, path, reader_method=None, reader_options=None, limit=None
):
batch_kwargs = self._datasource.process_batch_parameters(
reader_method=reader_method or self.reader_method,
reader_options=reader_options or self.reader_options,
limit=limit,
)
batch_kwargs["path"] = path
batch_kwargs["datasource"] = self._datasource.name
return PathBatchKwargs(batch_kwargs)
|
550fdd22eeedaac2d1006e47ad91581bceba71ef
|
9dd2bc9409bcdd7749cf0bad79092cd204200eb5
|
/polygon/rest/reference.py
|
38d15752d7285ac660030ba547c3c8eb1d3fbcc0
|
[
"MIT"
] |
permissive
|
polygon-io/client-python
|
b36ccdd380fdf4b9ec344c3e9d43eaab0ce313cc
|
195d3a2894b979c4ad86c6bd170b674e09c30d9d
|
refs/heads/master
| 2023-08-29T12:04:36.823546
| 2023-08-28T16:19:25
| 2023-08-28T16:19:25
| 216,660,192
| 574
| 189
|
MIT
| 2023-09-11T19:50:33
| 2019-10-21T20:33:17
|
Python
|
UTF-8
|
Python
| false
| false
| 23,755
|
py
|
reference.py
|
from .base import BaseClient
from typing import Optional, Any, Dict, List, Union, Iterator
from .models import (
MarketHoliday,
MarketStatus,
Ticker,
TickerChangeResults,
TickerDetails,
TickerNews,
TickerTypes,
Sort,
Order,
AssetClass,
Locale,
Split,
Dividend,
DividendType,
Frequency,
Condition,
DataType,
SIP,
Exchange,
OptionsContract,
)
from urllib3 import HTTPResponse
from datetime import date
from .models.request import RequestOptionBuilder
class MarketsClient(BaseClient):
def get_market_holidays(
self, params: Optional[Dict[str, Any]] = None, raw: bool = False
) -> Union[List[MarketHoliday], HTTPResponse]:
"""
Get upcoming market holidays and their open/close times.
:param params: Any additional query params.
:param raw: Return HTTPResponse object instead of results object.
:return: List of market holidays.
"""
url = "/v1/marketstatus/upcoming"
return self._get(
path=url,
params=params,
deserializer=MarketHoliday.from_dict,
raw=raw,
result_key="",
)
def get_market_status(
self, params: Optional[Dict[str, Any]] = None, raw: bool = False
) -> Union[MarketStatus, HTTPResponse]:
"""
Get the current trading status of the exchanges and overall financial markets.
:param params: Any additional query params.
:param raw: Return HTTPResponse object instead of results object.
:return: Market status.
"""
url = "/v1/marketstatus/now"
return self._get(
path=url, params=params, deserializer=MarketStatus.from_dict, raw=raw
)
class TickersClient(BaseClient):
def list_tickers(
self,
ticker: Optional[str] = None,
ticker_lt: Optional[str] = None,
ticker_lte: Optional[str] = None,
ticker_gt: Optional[str] = None,
ticker_gte: Optional[str] = None,
type: Optional[str] = None,
market: Optional[str] = None,
exchange: Optional[str] = None,
cusip: Optional[int] = None,
cik: Optional[int] = None,
date: Optional[str] = None,
active: Optional[bool] = None,
search: Optional[str] = None,
limit: Optional[int] = 10,
sort: Optional[Union[str, Sort]] = "ticker",
order: Optional[Union[str, Order]] = "asc",
params: Optional[Dict[str, Any]] = None,
raw: bool = False,
options: Optional[RequestOptionBuilder] = None,
) -> Union[Iterator[Ticker], HTTPResponse]:
"""
Query all ticker symbols which are supported by Polygon.io. This API currently includes Stocks/Equities, Indices, Forex, and Crypto.
:param ticker: Specify a ticker symbol. Defaults to empty string which queries all tickers.
:param ticker_lt: Ticker less than.
:param ticker_lte: Ticker less than or equal to.
:param ticker_gt: Ticker greater than.
:param ticker_gte: Ticker greater than or equal to.
:param type: Specify the type of the tickers. Find the types that we support via our Ticker Types API. Defaults to empty string which queries all types.
:param market: Filter by market type. By default all markets are included.
:param exchange: Specify the assets primary exchange Market Identifier Code (MIC) according to ISO 10383. Defaults to empty string which queries all exchanges.
:param cusip: Specify the CUSIP code of the asset you want to search for. Find more information about CUSIP codes at their website. Defaults to empty string which queries all CUSIPs.
:param cik: Specify the CIK of the asset you want to search for. Find more information about CIK codes at their website. Defaults to empty string which queries all CIKs.
:param date: Specify a point in time to retrieve tickers available on that date. Defaults to the most recent available date.
:param search: Search for terms within the ticker and/or company name.
:param active: Specify if the tickers returned should be actively traded on the queried date. Default is true.
:param limit: Limit the size of the response per-page, default is 100 and max is 1000.
:param sort: The field to sort the results on. Default is ticker. If the search query parameter is present, sort is ignored and results are ordered by relevance.
:param order: The order to sort the results on. Default is asc (ascending).
:param params: Any additional query params.
:param raw: Return raw object instead of results object.
:return: List of tickers.
"""
url = "/v3/reference/tickers"
return self._paginate(
path=url,
params=self._get_params(self.list_tickers, locals()),
raw=raw,
deserializer=Ticker.from_dict,
options=options,
)
def get_ticker_details(
self,
ticker: Optional[str] = None,
date: Optional[str] = None,
params: Optional[Dict[str, Any]] = None,
raw: bool = False,
options: Optional[RequestOptionBuilder] = None,
) -> Union[TickerDetails, HTTPResponse]:
"""
Get a single ticker supported by Polygon.io. This response will have detailed information about the ticker and the company behind it.
:param ticker: The ticker symbol of the asset.
:param date: Specify a point in time to get information about the ticker available on that date. When retrieving information from SEC filings, we compare this date with the period of report date on the SEC filing.
:param params: Any additional query params
:param raw: Return raw object instead of results object
:return: Ticker Details V3
"""
url = f"/v3/reference/tickers/{ticker}"
return self._get(
path=url,
params=self._get_params(self.get_ticker_details, locals()),
deserializer=TickerDetails.from_dict,
raw=raw,
result_key="results",
options=options,
)
def get_ticker_events(
self,
ticker: str,
types: Optional[str] = None,
params: Optional[Dict[str, Any]] = None,
raw: bool = False,
options: Optional[RequestOptionBuilder] = None,
) -> Union[TickerChangeResults, HTTPResponse]:
"""
Get event history of ticker given particular point in time.
:param ticker: The ticker symbol of the asset.
:param params: Additional query parameters
:param raw: Return raw object instead of results object.
:return: Ticker Event VX
"""
url = f"/vX/reference/tickers/{ticker}/events"
return self._get(
path=url,
params=self._get_params(self.get_ticker_events, locals()),
deserializer=TickerChangeResults.from_dict,
result_key="results",
raw=raw,
options=options,
)
def list_ticker_news(
self,
ticker: Optional[str] = None,
ticker_lt: Optional[str] = None,
ticker_lte: Optional[str] = None,
ticker_gt: Optional[str] = None,
ticker_gte: Optional[str] = None,
published_utc: Optional[str] = None,
published_utc_lt: Optional[str] = None,
published_utc_lte: Optional[str] = None,
published_utc_gt: Optional[str] = None,
published_utc_gte: Optional[str] = None,
limit: Optional[int] = None,
sort: Optional[Union[str, Sort]] = None,
order: Optional[Union[str, Order]] = None,
params: Optional[Dict[str, Any]] = None,
raw: bool = False,
options: Optional[RequestOptionBuilder] = None,
) -> Union[Iterator[TickerNews], HTTPResponse]:
"""
Get the most recent news articles relating to a stock ticker symbol, including a summary of the article and a link to the original source.
:param ticker: Return results that contain this ticker.
:param published_utc: Return results published on, before, or after this date.
:param limit: Limit the number of results returned per-page, default is 10 and max is 1000.
:param sort: Sort field used for ordering.
:param order: Order results based on the sort field.
:param params: Any additional query params.
:param raw: Return raw object instead of results object.
:return: Ticker News.
"""
url = "/v2/reference/news"
return self._paginate(
path=url,
params=self._get_params(self.list_ticker_news, locals()),
raw=raw,
deserializer=TickerNews.from_dict,
options=options,
)
def get_ticker_types(
self,
asset_class: Optional[Union[str, AssetClass]] = None,
locale: Optional[Union[str, Locale]] = None,
params: Optional[Dict[str, Any]] = None,
raw: bool = False,
options: Optional[RequestOptionBuilder] = None,
) -> Union[List[TickerTypes], HTTPResponse]:
"""
List all ticker types that Polygon.io has.
:param asset_class: Filter by asset class.
:param locale: Filter by locale.
:param params: Any additional query params.
:param raw: Return raw object instead of results object.
:return: Ticker Types.
"""
url = "/v3/reference/tickers/types"
return self._get(
path=url,
params=self._get_params(self.get_ticker_types, locals()),
deserializer=TickerTypes.from_dict,
raw=raw,
result_key="results",
options=options,
)
class SplitsClient(BaseClient):
def list_splits(
self,
ticker: Optional[str] = None,
ticker_lt: Optional[str] = None,
ticker_lte: Optional[str] = None,
ticker_gt: Optional[str] = None,
ticker_gte: Optional[str] = None,
execution_date: Optional[Union[str, date]] = None,
execution_date_lt: Optional[Union[str, date]] = None,
execution_date_lte: Optional[Union[str, date]] = None,
execution_date_gt: Optional[Union[str, date]] = None,
execution_date_gte: Optional[Union[str, date]] = None,
reverse_split: Optional[bool] = None,
limit: Optional[int] = None,
sort: Optional[Union[str, Sort]] = None,
order: Optional[Union[str, Order]] = None,
params: Optional[Dict[str, Any]] = None,
raw: bool = False,
options: Optional[RequestOptionBuilder] = None,
) -> Union[Iterator[Split], HTTPResponse]:
"""
Get a list of historical stock splits, including the ticker symbol, the execution date, and the factors of the split ratio.
:param ticker: Return the stock splits that contain this ticker.
:param ticker_lt: Ticker less than.
:param ticker_lte: Ticker less than or equal to.
:param ticker_gt: Ticker greater than.
:param ticker_gte: Ticker greater than or equal to.
:param execution_date: Query by execution date with the format YYYY-MM-DD.
:param execution_date_lt: Execution date less than.
:param execution_date_lte: Execution date less than or equal to.
:param execution_date_gt: Execution date greater than.
:param execution_date_gte: Execution date greater than or equal to.
:param reverse_split: Query for reverse stock splits. A split ratio where split_from is greater than split_to represents a reverse split. By default this filter is not used.
:param limit: Limit the number of results returned per-page, default is 10 and max is 1000.
:param sort: Sort field used for ordering.
:param order: Order results based on the sort field.
:param params: Any additional query params.
:param raw: Return raw object instead of results object.
:return: List of splits.
"""
url = "/v3/reference/splits"
return self._paginate(
path=url,
params=self._get_params(self.list_splits, locals()),
raw=raw,
deserializer=Split.from_dict,
options=options,
)
class DividendsClient(BaseClient):
def list_dividends(
self,
ticker: Optional[str] = None,
ticker_lt: Optional[str] = None,
ticker_lte: Optional[str] = None,
ticker_gt: Optional[str] = None,
ticker_gte: Optional[str] = None,
ex_dividend_date: Optional[Union[str, date]] = None,
ex_dividend_date_lt: Optional[Union[str, date]] = None,
ex_dividend_date_lte: Optional[Union[str, date]] = None,
ex_dividend_date_gt: Optional[Union[str, date]] = None,
ex_dividend_date_gte: Optional[Union[str, date]] = None,
record_date: Optional[Union[str, date]] = None,
record_date_lt: Optional[Union[str, date]] = None,
record_date_lte: Optional[Union[str, date]] = None,
record_date_gt: Optional[Union[str, date]] = None,
record_date_gte: Optional[Union[str, date]] = None,
declaration_date: Optional[Union[str, date]] = None,
declaration_date_lt: Optional[Union[str, date]] = None,
declaration_date_lte: Optional[Union[str, date]] = None,
declaration_date_gt: Optional[Union[str, date]] = None,
declaration_date_gte: Optional[Union[str, date]] = None,
pay_date: Optional[Union[str, date]] = None,
pay_date_lt: Optional[Union[str, date]] = None,
pay_date_lte: Optional[Union[str, date]] = None,
pay_date_gt: Optional[Union[str, date]] = None,
pay_date_gte: Optional[Union[str, date]] = None,
frequency: Optional[Union[int, Frequency]] = None,
cash_amount: Optional[float] = None,
cash_amount_lt: Optional[float] = None,
cash_amount_lte: Optional[float] = None,
cash_amount_gt: Optional[float] = None,
cash_amount_gte: Optional[float] = None,
dividend_type: Optional[Union[str, DividendType]] = None,
limit: Optional[int] = None,
sort: Optional[Union[str, Sort]] = None,
order: Optional[Union[str, Order]] = None,
params: Optional[Dict[str, Any]] = None,
raw: bool = False,
options: Optional[RequestOptionBuilder] = None,
) -> Union[Iterator[Dividend], HTTPResponse]:
"""
Get a list of historical cash dividends, including the ticker symbol, declaration date, ex-dividend date, record date, pay date, frequency, and amount.
:param ticker: Return the dividends that contain this ticker.
:param ticker_lt: Ticker less than.
:param ticker_lte: Ticker less than or equal to.
:param ticker_gt: Ticker greater than.
:param ticker_gte: Ticker greater than or equal to.
:param ex_dividend_date: Query by ex-dividend date with the format YYYY-MM-DD.
:param ex_dividend_date_lt: Ex-dividend date less than.
:param ex_dividend_date_lte: Ex-dividend date less than or equal to.
:param ex_dividend_date_gt: Ex-dividend date greater than.
:param ex_dividend_date_gte: Ex-dividend date greater than or equal to.
:param record_date: Query by record date with the format YYYY-MM-DD.
:param record_date_lt: Record date less than.
:param record_date_lte: Record date less than or equal to.
:param record_date_gt: Record date greater than.
:param record_date_gte: Record date greater than or equal to.
:param declaration_date: Query by declaration date with the format YYYY-MM-DD.
:param declaration_date_lt: Declaration date less than.
:param declaration_date_lte: Declaration date less than or equal to.
:param declaration_date_gt: Declaration date greater than.
:param declaration_date_gte: Declaration date greater than or equal to.
:param pay_date: Query by pay date with the format YYYY-MM-DD.
:param pay_date_lt: Pay date less than.
:param pay_date_lte: Pay date less than or equal to.
:param pay_date_gt: Pay date greater than.
:param pay_date_gte: Pay date greater than or equal to.
:param frequency: Query by the number of times per year the dividend is paid out. Possible values are 0 (one-time), 1 (annually), 2 (bi-annually), 4 (quarterly), and 12 (monthly).
:param cash_amount: Query by the cash amount of the dividend.
:param dividend_type: Query by the type of dividend. Dividends that have been paid and/or are expected to be paid on consistent schedules are denoted as CD. Special Cash dividends that have been paid that are infrequent or unusual, and/or can not be expected to occur in the future are denoted as SC.
:param limit: Limit the number of results returned per-page, default is 10 and max is 1000.
:param sort: Sort field used for ordering.
:param order: Order results based on the sort field.
:param params: Any additional query params.
:param raw: Return raw object instead of results object.
:return: List of dividends.
"""
url = "/v3/reference/dividends"
return self._paginate(
path=url,
params=self._get_params(self.list_dividends, locals()),
raw=raw,
deserializer=Dividend.from_dict,
options=options,
)
class ConditionsClient(BaseClient):
def list_conditions(
self,
asset_class: Optional[Union[str, AssetClass]] = None,
data_type: Optional[Union[str, DataType]] = None,
id: Optional[int] = None,
sip: Optional[Union[str, SIP]] = None,
limit: Optional[int] = None,
sort: Optional[Union[str, Sort]] = None,
order: Optional[Union[str, Order]] = None,
params: Optional[Dict[str, Any]] = None,
raw: bool = False,
options: Optional[RequestOptionBuilder] = None,
) -> Union[Iterator[Condition], HTTPResponse]:
"""
List all conditions that Polygon.io uses.
:param asset_class: Filter for conditions within a given asset class.
:param data_type: Data types that this condition applies to.
:param id: Filter for conditions with a given ID.
:param sip: Filter by SIP. If the condition contains a mapping for that SIP, the condition will be returned.
:param limit: Limit the number of results returned per-page, default is 10 and max is 1000.
:param sort: Sort field used for ordering.
:param order: Order results based on the sort field.
:param params: Any additional query params.
:param raw: Return raw object instead of results object.
:return: List of conditions.
"""
url = "/v3/reference/conditions"
return self._paginate(
path=url,
params=self._get_params(self.list_conditions, locals()),
raw=raw,
deserializer=Condition.from_dict,
options=options,
)
class ExchangesClient(BaseClient):
def get_exchanges(
self,
asset_class: Optional[Union[str, AssetClass]] = None,
locale: Optional[Union[str, Locale]] = None,
params: Optional[Dict[str, Any]] = None,
raw: bool = False,
options: Optional[RequestOptionBuilder] = None,
) -> Union[List[Exchange], HTTPResponse]:
"""
List all exchanges that Polygon.io knows about.
:param asset_class: Filter by asset class.
:param locale: Filter by locale.
:param params: Any additional query params.
:param raw: Return HTTPResponse object instead of results object.
:return: List of exchanges.
"""
url = "/v3/reference/exchanges"
return self._get(
path=url,
params=self._get_params(self.get_exchanges, locals()),
deserializer=Exchange.from_dict,
raw=raw,
result_key="results",
options=options,
)
class ContractsClient(BaseClient):
def get_options_contract(
self,
ticker: str,
as_of: Optional[Union[str, date]] = None,
params: Optional[Dict[str, Any]] = None,
raw: bool = False,
options: Optional[RequestOptionBuilder] = None,
) -> Union[OptionsContract, HTTPResponse]:
"""
Get the most recent trade for a ticker.
:param ticker: The ticker symbol of the asset
:param as_of: Specify a point in time for the contract as of this date with format YYYY-MM-DD.
:param params: Any additional query params.
:param raw: Return raw object instead of results object.
:return: Last trade.
"""
url = f"/v3/reference/options/contracts/{ticker}"
return self._get(
path=url,
params=self._get_params(self.get_options_contract, locals()),
result_key="results",
deserializer=OptionsContract.from_dict,
raw=raw,
options=options,
)
def list_options_contracts(
self,
underlying_ticker: Optional[str] = None,
underlying_ticker_lt: Optional[str] = None,
underlying_ticker_lte: Optional[str] = None,
underlying_ticker_gt: Optional[str] = None,
underlying_ticker_gte: Optional[str] = None,
contract_type: Optional[str] = None,
expiration_date: Optional[Union[str, date]] = None,
expiration_date_lt: Optional[Union[str, date]] = None,
expiration_date_lte: Optional[Union[str, date]] = None,
expiration_date_gt: Optional[Union[str, date]] = None,
expiration_date_gte: Optional[Union[str, date]] = None,
as_of: Optional[Union[str, date]] = None,
strike_price: Optional[float] = None,
strike_price_lt: Optional[float] = None,
strike_price_lte: Optional[float] = None,
strike_price_gt: Optional[float] = None,
strike_price_gte: Optional[float] = None,
expired: Optional[bool] = None,
limit: Optional[int] = None,
sort: Optional[Union[str, Sort]] = None,
order: Optional[Union[str, Order]] = None,
params: Optional[Dict[str, Any]] = None,
raw: bool = False,
options: Optional[RequestOptionBuilder] = None,
) -> Union[Iterator[OptionsContract], HTTPResponse]:
"""
List historical options contracts.
:param underlying_ticker: Query for contracts relating to an underlying stock ticker.
:param contract_type: Query by the type of contract.
:param expiration_date: Query by contract expiration with date format YYYY-MM-DD.
:param as_of: Specify a point in time for contracts as of this date with format YYYY-MM-DD.
:param strike_price: Query by strike price of a contract.
:param expired: Query for expired contracts.
:param limit: Limit the number of results returned per-page, default is 10 and max is 1000.
:param sort: Sort field used for ordering.
:param order: Order results based on the sort field.
:param params: Any additional query params.
:param raw: Return raw object instead of results object
:return: List of options contracts.
"""
url = "/v3/reference/options/contracts"
return self._paginate(
path=url,
params=self._get_params(self.list_options_contracts, locals()),
raw=raw,
deserializer=OptionsContract.from_dict,
options=options,
)
|
02f05b3eb0373a74093d1a07db8458c762cf0b78
|
5141195cd54aa2dcb7089e110cbe836d1308a01d
|
/tlm_convert_profile.py
|
9ce89d53ce3bc663d3fa1fc0dfdabb881d9499fb
|
[
"BSD-3-Clause"
] |
permissive
|
amaxwell/tlutility
|
5bafaf390d78c25fc334680f6780b0873e7ce369
|
7e288057b25316363e1b3beb2d0e453976b2ee22
|
refs/heads/master
| 2023-07-05T16:14:03.899143
| 2023-04-17T04:57:27
| 2023-04-17T04:57:27
| 22,581,590
| 301
| 27
|
BSD-3-Clause
| 2021-09-15T14:04:26
| 2014-08-03T19:07:29
|
Objective-C
|
UTF-8
|
Python
| false
| false
| 3,953
|
py
|
tlm_convert_profile.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# This software is Copyright (c) 2009-2016
# Adam Maxwell. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# - Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# - Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# - Neither the name of Adam Maxwell nor the names of any
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
from Foundation import *
options = []
variables = []
docs = []
lang = []
collections = []
other = []
TEXLIVE_YEAR = "2021"
for line in open("/usr/local/texlive/%s/tlpkg/texlive.profile" % (TEXLIVE_YEAR), "r"):
if line.startswith("#"):
continue
keyValues = line.strip().split(" ")
key = keyValues[0]
value = " ".join(keyValues[1:])
profileDictionary = {}
profileDictionary["key"] = key
if len(value) == 1 and value.isdigit():
profileDictionary["value"] = (value and True or False)
else:
profileDictionary["value"] = value
# default human-readable string
profileDictionary["name"] = key
if key.startswith("TEX"):
profileDictionary["name"] = key
profileDictionary["type"] = "variable"
variables.append(profileDictionary)
# do longest match first on keys with a common prefix
elif key.startswith("collection-documentation-"):
profileDictionary["name"] = key[len("collection-documentation-"):].capitalize()
profileDictionary["type"] = "documentation"
docs.append(profileDictionary)
elif key.startswith("collection-lang"):
profileDictionary["name"] = key[len("collection-lang"):].capitalize()
profileDictionary["type"] = "language"
lang.append(profileDictionary)
elif key.startswith("collection-"):
profileDictionary["name"] = key[len("collection-"):]
profileDictionary["type"] = "collection"
collections.append(profileDictionary)
elif key.startswith("option"):
profileDictionary["type"] = "option"
options.append(profileDictionary)
else:
profileDictionary["type"] = "unknown"
other.append(profileDictionary)
profileValues = { "options" : options, "variables" : variables, "documentation" : docs, "languages" : lang, "collections" : collections, "other" : other }
# add another dictionary for my metadata
profileValues["com.googlecode.mactlmgr"] = { "texliveyear" : TEXLIVE_YEAR }
plist, error = NSPropertyListSerialization.dataFromPropertyList_format_errorDescription_(profileValues, NSPropertyListXMLFormat_v1_0, None)
plist.writeToFile_atomically_("texlive.profile.plist", False)
#print plist
#plist = NSDictionary.dictionaryWithDictionary_(pd)
#print plist
|
f8be8405ffe9b5e9251af79f0a03055c66d64b4f
|
856e9a8afcb81ae66dd998b0d2cc3556c9f315ea
|
/dexy/reporters/nodegraph/__init__.py
|
1c43c36c2ada9f5d833ee6c1c64f1c7f9dff279a
|
[
"MIT"
] |
permissive
|
dexy/dexy
|
1d5c999830de4663c05a09f4cd00b1628dfc8d46
|
323c1806e51f75435e11d2265703e68f46c8aef3
|
refs/heads/develop
| 2023-06-10T08:02:45.076551
| 2021-02-28T22:40:41
| 2021-02-28T22:40:41
| 1,506,989
| 141
| 34
|
MIT
| 2020-06-15T17:44:50
| 2011-03-21T14:48:28
|
Python
|
UTF-8
|
Python
| false
| false
| 113
|
py
|
__init__.py
|
import dexy.reporters.nodegraph.d3
import dexy.reporters.nodegraph.text
import dexy.reporters.nodegraph.graphviz
|
f645bc486d09582464794834ff51e42234c252a4
|
b2d49f04f220d46a7572bf424b6c02c7466786e8
|
/course/utils.py
|
d6a972fbc067ca02edb20ad453b3a222b5f69c42
|
[
"MIT"
] |
permissive
|
inducer/relate
|
19a7b83e07de63216918d71e875eca53ee8ceb0c
|
7c28f65ef99a6f05007c518763762aca4504145b
|
refs/heads/main
| 2023-08-29T10:32:43.952986
| 2023-08-28T15:17:16
| 2023-08-28T18:40:16
| 20,311,659
| 352
| 138
| null | 2023-09-09T00:20:05
| 2014-05-29T23:39:42
|
Python
|
UTF-8
|
Python
| false
| false
| 45,559
|
py
|
utils.py
|
from __future__ import annotations
__copyright__ = "Copyright (C) 2014 Andreas Kloeckner"
__license__ = """
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import datetime # noqa
from contextlib import ContextDecorator
from typing import ( # noqa
TYPE_CHECKING, Any, Dict, FrozenSet, Iterable, List, Optional, Text, Tuple,
Union, cast,
)
import markdown
from django import http
from django.core.exceptions import ObjectDoesNotExist
from django.shortcuts import get_object_or_404, render # noqa
from django.utils import translation
from django.utils.translation import gettext as _, pgettext_lazy
from course.constants import flow_permission, flow_rule_kind
from course.content import ( # noqa
CourseCommitSHADoesNotExist, FlowDesc, FlowPageDesc, FlowSessionAccessRuleDesc,
FlowSessionGradingRuleDesc, FlowSessionStartRuleDesc, get_course_commit_sha,
get_course_repo, get_flow_desc, parse_date_spec,
)
from course.page.base import PageBase, PageContext # noqa
from relate.utils import string_concat
# {{{ mypy
if TYPE_CHECKING:
from codemirror import CodeMirrorJavascript, CodeMirrorTextarea # noqa
from course.content import Repo_ish # noqa
from course.models import ( # noqa
Course, ExamTicket, FlowPageData, FlowSession, Participation,
)
from relate.utils import Repo_ish # noqa
# }}}
import re
CODE_CELL_DIV_ATTRS_RE = re.compile('(<div class="[^>]*code_cell[^>"]*")(>)')
def getattr_with_fallback(
aggregates: Iterable[Any], attr_name: str, default: Any = None) -> Any:
for agg in aggregates:
result = getattr(agg, attr_name, None)
if result is not None:
return result
return default
# {{{ flow permissions
class FlowSessionRuleBase:
pass
class FlowSessionStartRule(FlowSessionRuleBase):
def __init__(
self,
tag_session: str | None = None,
may_start_new_session: bool | None = None,
may_list_existing_sessions: bool | None = None,
default_expiration_mode: str | None = None,
) -> None:
self.tag_session = tag_session
self.may_start_new_session = may_start_new_session
self.may_list_existing_sessions = may_list_existing_sessions
self.default_expiration_mode = default_expiration_mode
class FlowSessionAccessRule(FlowSessionRuleBase):
def __init__(
self,
permissions: frozenset[str],
message: str | None = None,
) -> None:
self.permissions = permissions
self.message = message
def human_readable_permissions(self):
from course.models import FLOW_PERMISSION_CHOICES
permission_dict = dict(FLOW_PERMISSION_CHOICES)
return [permission_dict[p] for p in self.permissions]
class FlowSessionGradingRule(FlowSessionRuleBase):
def __init__(
self,
grade_identifier: str | None,
grade_aggregation_strategy: str,
due: datetime.datetime | None,
generates_grade: bool,
description: str | None = None,
credit_percent: float | None = None,
use_last_activity_as_completion_time: bool | None = None,
max_points: float | None = None,
max_points_enforced_cap: float | None = None,
bonus_points: float = 0,
) -> None:
self.grade_identifier = grade_identifier
self.grade_aggregation_strategy = grade_aggregation_strategy
self.due = due
self.generates_grade = generates_grade
self.description = description
self.credit_percent = credit_percent
self.use_last_activity_as_completion_time = \
use_last_activity_as_completion_time
self.max_points = max_points
self.max_points_enforced_cap = max_points_enforced_cap
self.bonus_points = bonus_points
def _eval_generic_conditions(
rule: Any,
course: Course,
participation: Participation | None,
now_datetime: datetime.datetime,
flow_id: str,
login_exam_ticket: ExamTicket | None,
) -> bool:
if hasattr(rule, "if_before"):
ds = parse_date_spec(course, rule.if_before)
if not (now_datetime <= ds):
return False
if hasattr(rule, "if_after"):
ds = parse_date_spec(course, rule.if_after)
if not (now_datetime >= ds):
return False
if hasattr(rule, "if_has_role"):
from course.enrollment import get_participation_role_identifiers
roles = get_participation_role_identifiers(course, participation)
if all(role not in rule.if_has_role for role in roles):
return False
if (hasattr(rule, "if_signed_in_with_matching_exam_ticket")
and rule.if_signed_in_with_matching_exam_ticket):
if login_exam_ticket is None:
return False
if login_exam_ticket.exam.flow_id != flow_id:
return False
return True
def _eval_generic_session_conditions(
rule: Any,
session: FlowSession,
now_datetime: datetime.datetime,
) -> bool:
if hasattr(rule, "if_has_tag"):
if session.access_rules_tag != rule.if_has_tag:
return False
if hasattr(rule, "if_started_before"):
ds = parse_date_spec(session.course, rule.if_started_before)
if not session.start_time < ds:
return False
return True
def _eval_participation_tags_conditions(
rule: Any,
participation: Participation | None,
) -> bool:
participation_tags_any_set = (
set(getattr(rule, "if_has_participation_tags_any", [])))
participation_tags_all_set = (
set(getattr(rule, "if_has_participation_tags_all", [])))
if participation_tags_any_set or participation_tags_all_set:
if not participation:
# Return False for anonymous users if only
# if_has_participation_tags_any or if_has_participation_tags_all
# is not empty.
return False
ptag_set = set(participation.tags.all().values_list("name", flat=True))
if not ptag_set:
return False
if (
participation_tags_any_set
and not participation_tags_any_set & ptag_set):
return False
if (
participation_tags_all_set
and not participation_tags_all_set <= ptag_set):
return False
return True
def get_flow_rules(
flow_desc: FlowDesc,
kind: str,
participation: Participation | None,
flow_id: str,
now_datetime: datetime.datetime,
consider_exceptions: bool = True,
default_rules_desc: Optional[list[Any]] = None
) -> list[Any]:
if default_rules_desc is None:
default_rules_desc = []
if (not hasattr(flow_desc, "rules")
or not hasattr(flow_desc.rules, kind)):
rules = default_rules_desc[:]
else:
rules = getattr(flow_desc.rules, kind)[:]
from course.models import FlowRuleException
if consider_exceptions:
for exc in (
FlowRuleException.objects
.filter(
participation=participation,
active=True,
kind=kind,
flow_id=flow_id)
# rules created first will get inserted first, and show up last
.order_by("creation_time")):
if exc.expiration is not None and now_datetime > exc.expiration:
continue
from relate.utils import dict_to_struct
rules.insert(0, dict_to_struct(exc.rule))
return rules
def get_session_start_rule(
course: Course,
participation: Participation | None,
flow_id: str,
flow_desc: FlowDesc,
now_datetime: datetime.datetime,
facilities: frozenset[str] | None = None,
for_rollover: bool = False,
login_exam_ticket: ExamTicket | None = None,
) -> FlowSessionStartRule:
"""Return a :class:`FlowSessionStartRule` if a new session is
permitted or *None* if no new session is allowed.
"""
if facilities is None:
facilities = frozenset()
from relate.utils import dict_to_struct
rules: list[FlowSessionStartRuleDesc] = get_flow_rules(
flow_desc, flow_rule_kind.start,
participation, flow_id, now_datetime,
default_rules_desc=[
dict_to_struct({
"may_start_new_session": True,
"may_list_existing_sessions": False})])
from course.models import FlowSession # noqa
for rule in rules:
if not _eval_generic_conditions(rule, course, participation,
now_datetime, flow_id=flow_id,
login_exam_ticket=login_exam_ticket):
continue
if not _eval_participation_tags_conditions(rule, participation):
continue
if not for_rollover and hasattr(rule, "if_in_facility"):
if rule.if_in_facility not in facilities:
continue
if not for_rollover and hasattr(rule, "if_has_in_progress_session"):
session_count = FlowSession.objects.filter(
participation=participation,
course=course,
flow_id=flow_id,
in_progress=True).count()
if bool(session_count) != rule.if_has_in_progress_session:
continue
if not for_rollover and hasattr(rule, "if_has_session_tagged"):
tagged_session_count = FlowSession.objects.filter(
participation=participation,
course=course,
access_rules_tag=rule.if_has_session_tagged,
flow_id=flow_id).count()
if not tagged_session_count:
continue
if not for_rollover and hasattr(rule, "if_has_fewer_sessions_than"):
session_count = FlowSession.objects.filter(
participation=participation,
course=course,
flow_id=flow_id).count()
if session_count >= rule.if_has_fewer_sessions_than:
continue
if not for_rollover and hasattr(rule, "if_has_fewer_tagged_sessions_than"):
tagged_session_count = FlowSession.objects.filter(
participation=participation,
course=course,
access_rules_tag__isnull=False,
flow_id=flow_id).count()
if tagged_session_count >= rule.if_has_fewer_tagged_sessions_than:
continue
return FlowSessionStartRule(
tag_session=getattr(rule, "tag_session", None),
may_start_new_session=getattr(
rule, "may_start_new_session", True),
may_list_existing_sessions=getattr(
rule, "may_list_existing_sessions", True),
default_expiration_mode=getattr(
rule, "default_expiration_mode", None),
)
return FlowSessionStartRule(
may_list_existing_sessions=False,
may_start_new_session=False)
def get_session_access_rule(
session: FlowSession,
flow_desc: FlowDesc,
now_datetime: datetime.datetime,
facilities: frozenset[str] | None = None,
login_exam_ticket: ExamTicket | None = None,
) -> FlowSessionAccessRule:
if facilities is None:
facilities = frozenset()
from relate.utils import dict_to_struct
rules: list[FlowSessionAccessRuleDesc] = get_flow_rules(
flow_desc, flow_rule_kind.access,
session.participation, session.flow_id, now_datetime,
default_rules_desc=[
dict_to_struct({
"permissions": [flow_permission.view],
})])
for rule in rules:
if not _eval_generic_conditions(
rule, session.course, session.participation,
now_datetime, flow_id=session.flow_id,
login_exam_ticket=login_exam_ticket):
continue
if not _eval_participation_tags_conditions(rule, session.participation):
continue
if not _eval_generic_session_conditions(rule, session, now_datetime):
continue
if hasattr(rule, "if_in_facility"):
if rule.if_in_facility not in facilities:
continue
if hasattr(rule, "if_in_progress"):
if session.in_progress != rule.if_in_progress:
continue
if hasattr(rule, "if_expiration_mode"):
if session.expiration_mode != rule.if_expiration_mode:
continue
if hasattr(rule, "if_session_duration_shorter_than_minutes"):
duration_min = (now_datetime - session.start_time).total_seconds() / 60
if session.participation is not None:
duration_min /= float(session.participation.time_factor)
if duration_min > rule.if_session_duration_shorter_than_minutes:
continue
permissions = set(rule.permissions)
# {{{ deal with deprecated permissions
if "modify" in permissions:
permissions.remove("modify")
permissions.update([
flow_permission.submit_answer,
flow_permission.end_session,
])
if "see_answer" in permissions:
permissions.remove("see_answer")
permissions.add(flow_permission.see_answer_after_submission)
# }}}
# Remove 'modify' permission from not-in-progress sessions
if not session.in_progress:
for perm in [
flow_permission.submit_answer,
flow_permission.end_session,
]:
if perm in permissions:
permissions.remove(perm)
return FlowSessionAccessRule(
permissions=frozenset(permissions),
message=getattr(rule, "message", None)
)
return FlowSessionAccessRule(permissions=frozenset())
def get_session_grading_rule(
session: FlowSession,
flow_desc: FlowDesc,
now_datetime: datetime.datetime
) -> FlowSessionGradingRule:
flow_desc_rules = getattr(flow_desc, "rules", None)
from relate.utils import dict_to_struct
rules: list[FlowSessionGradingRuleDesc] = get_flow_rules(
flow_desc, flow_rule_kind.grading,
session.participation, session.flow_id, now_datetime,
default_rules_desc=[
dict_to_struct({
"generates_grade": False,
})])
from course.enrollment import get_participation_role_identifiers
roles = get_participation_role_identifiers(session.course, session.participation)
for rule in rules:
if hasattr(rule, "if_has_role"):
if all(role not in rule.if_has_role for role in roles):
continue
if not _eval_generic_session_conditions(rule, session, now_datetime):
continue
if not _eval_participation_tags_conditions(rule, session.participation):
continue
if hasattr(rule, "if_completed_before"):
ds = parse_date_spec(session.course, rule.if_completed_before)
use_last_activity_as_completion_time = False
if hasattr(rule, "use_last_activity_as_completion_time"):
use_last_activity_as_completion_time = \
rule.use_last_activity_as_completion_time
if use_last_activity_as_completion_time:
last_activity = session.last_activity()
if last_activity is not None:
completion_time = last_activity
else:
completion_time = now_datetime
else:
if session.in_progress:
completion_time = now_datetime
else:
completion_time = session.completion_time
if completion_time > ds:
continue
due_str = getattr(rule, "due", None)
if due_str is not None:
due = parse_date_spec(session.course, due_str)
assert due.tzinfo is not None
else:
due = None
generates_grade = getattr(rule, "generates_grade", True)
grade_identifier = None
grade_aggregation_strategy = None
if flow_desc_rules is not None:
grade_identifier = flow_desc_rules.grade_identifier
grade_aggregation_strategy = getattr(
flow_desc_rules, "grade_aggregation_strategy", None)
bonus_points = getattr_with_fallback((rule, flow_desc), "bonus_points", 0)
max_points = getattr_with_fallback((rule, flow_desc), "max_points", None)
max_points_enforced_cap = getattr_with_fallback(
(rule, flow_desc), "max_points_enforced_cap", None)
grade_aggregation_strategy = cast(str, grade_aggregation_strategy)
return FlowSessionGradingRule(
grade_identifier=grade_identifier,
grade_aggregation_strategy=grade_aggregation_strategy,
due=due,
generates_grade=generates_grade,
description=getattr(rule, "description", None),
credit_percent=getattr(rule, "credit_percent", 100),
use_last_activity_as_completion_time=getattr(
rule, "use_last_activity_as_completion_time", False),
bonus_points=bonus_points,
max_points=max_points,
max_points_enforced_cap=max_points_enforced_cap,
)
raise RuntimeError(_("grading rule determination was unable to find "
"a grading rule"))
# }}}
# {{{ contexts
class AnyArgumentType: # noqa
pass
ANY_ARGUMENT = AnyArgumentType()
class CoursePageContext:
def __init__(self, request: http.HttpRequest, course_identifier: str) -> None:
self.request = request
self.course_identifier = course_identifier
self._permissions_cache: frozenset[tuple[str, str | None]] | None = None # noqa
self._role_identifiers_cache: list[str] | None = None
self.old_language = None
# using this to prevent nested using as context manager
self._is_in_context_manager = False
from course.models import Course # noqa
self.course = get_object_or_404(Course, identifier=course_identifier)
from course.enrollment import get_participation_for_request
self.participation = get_participation_for_request(
request, self.course)
from course.views import check_course_state
check_course_state(self.course, self.participation)
self.repo = get_course_repo(self.course)
try:
sha = get_course_commit_sha(
self.course, self.participation,
repo=self.repo,
raise_on_nonexistent_preview_commit=True)
except CourseCommitSHADoesNotExist as e:
from django.contrib import messages
messages.add_message(request, messages.ERROR, str(e))
sha = self.course.active_git_commit_sha.encode()
self.course_commit_sha = sha
def role_identifiers(self) -> list[str]:
if self._role_identifiers_cache is not None:
return self._role_identifiers_cache
from course.enrollment import get_participation_role_identifiers
self._role_identifiers_cache = get_participation_role_identifiers(
self.course, self.participation)
return self._role_identifiers_cache
def permissions(self) -> frozenset[tuple[str, str | None]]:
if self.participation is None:
if self._permissions_cache is not None:
return self._permissions_cache
from course.enrollment import get_participation_permissions
perm = get_participation_permissions(self.course, self.participation)
self._permissions_cache = perm
return perm
else:
return self.participation.permissions()
def has_permission(
self, perm: str, argument: str | AnyArgumentType | None = None
) -> bool:
if argument is ANY_ARGUMENT:
return any(perm == p
for p, arg in self.permissions())
else:
return (perm, argument) in self.permissions()
def _set_course_lang(self, action: str) -> None:
if self.course.force_lang and self.course.force_lang.strip():
if action == "activate":
self.old_language = translation.get_language()
translation.activate(self.course.force_lang)
else:
if self.old_language is None:
# This should be a rare case, but get_language() can be None.
# See django.utils.translation.override.__exit__()
translation.deactivate_all()
else:
translation.activate(self.old_language)
def __enter__(self):
if self._is_in_context_manager:
raise RuntimeError(
"Nested use of 'course_view' as context manager "
"is not allowed.")
self._is_in_context_manager = True
self._set_course_lang(action="activate")
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self._is_in_context_manager = False
self._set_course_lang(action="deactivate")
self.repo.close()
class FlowContext:
def __init__(
self,
repo: Repo_ish,
course: Course,
flow_id: str,
participation: Participation | None = None) -> None:
"""*participation* and *flow_session* are not stored and only used
to figure out versioning of the flow content.
"""
self.repo = repo
self.course = course
self.flow_id = flow_id
from django.core.exceptions import ObjectDoesNotExist
self.course_commit_sha = get_course_commit_sha(
self.course, participation)
try:
self.flow_desc = get_flow_desc(self.repo, self.course,
flow_id, self.course_commit_sha)
except ObjectDoesNotExist:
raise http.Http404()
class PageOrdinalOutOfRange(http.Http404):
pass
class FlowPageContext(FlowContext):
"""This object acts as a container for all the information that a flow page
may need to render itself or respond to a POST.
Note that this is different from :class:`course.page.PageContext`,
which is used for in the page API.
"""
def __init__(
self,
repo: Repo_ish,
course: Course,
flow_id: str,
page_ordinal: int,
participation: Participation | None,
flow_session: FlowSession,
request: http.HttpRequest | None = None,
) -> None:
super().__init__(repo, course, flow_id, participation)
if page_ordinal >= flow_session.page_count:
raise PageOrdinalOutOfRange()
from course.models import FlowPageData # noqa
page_data = self.page_data = get_object_or_404(
FlowPageData, flow_session=flow_session, page_ordinal=page_ordinal)
from course.content import get_flow_page_desc
try:
self.page_desc: FlowPageDesc | None = get_flow_page_desc(
flow_session.flow_id, self.flow_desc, page_data.group_id,
page_data.page_id)
except ObjectDoesNotExist:
self.page_desc = None
self.page: PageBase | None = None
self.page_context: PageContext | None = None
else:
self.page = instantiate_flow_page_with_ctx(self, page_data)
page_uri = None
if request is not None:
from django.urls import reverse
page_uri = request.build_absolute_uri(
reverse(
"relate-view_flow_page",
args=(course.identifier, flow_session.id, page_ordinal)))
self.page_context = PageContext(
course=self.course, repo=self.repo,
commit_sha=self.course_commit_sha,
flow_session=flow_session,
page_uri=page_uri,
request=request)
self._prev_answer_visit = False
@property
def prev_answer_visit(self):
if self._prev_answer_visit is False:
from course.flow import get_prev_answer_visit
self._prev_answer_visit = get_prev_answer_visit(self.page_data)
return self._prev_answer_visit
@property
def page_ordinal(self):
return self.page_data.page_ordinal
def instantiate_flow_page_with_ctx(
fctx: FlowContext, page_data: FlowPageData) -> PageBase:
from course.content import get_flow_page_desc
page_desc = get_flow_page_desc(
fctx.flow_id, fctx.flow_desc,
page_data.group_id, page_data.page_id)
from course.content import instantiate_flow_page
return instantiate_flow_page(
"course '%s', flow '%s', page '%s/%s'"
% (fctx.course.identifier, fctx.flow_id,
page_data.group_id, page_data.page_id),
fctx.repo, page_desc, fctx.course_commit_sha)
# }}}
# {{{ utilties for course-based views
def course_view(f):
def wrapper(request, course_identifier, *args, **kwargs):
with CoursePageContext(request, course_identifier) as pctx:
response = f(pctx, *args, **kwargs)
pctx.repo.close()
return response
from functools import update_wrapper
update_wrapper(wrapper, f)
return wrapper
class ParticipationPermissionWrapper:
def __init__(self, pctx: CoursePageContext) -> None:
self.pctx = pctx
def __getitem__(self, perm: str) -> bool:
from course.constants import participation_permission
try:
getattr(participation_permission, perm)
except AttributeError:
raise ValueError("permission name '%s' not valid" % perm)
return self.pctx.has_permission(perm, ANY_ARGUMENT)
def __iter__(self):
raise TypeError("ParticipationPermissionWrapper is not iterable.")
def render_course_page(
pctx: CoursePageContext, template_name: str, args: dict[str, Any],
allow_instant_flow_requests: bool = True) -> http.HttpResponse:
args = args.copy()
from course.views import get_now_or_fake_time
now_datetime = get_now_or_fake_time(pctx.request)
if allow_instant_flow_requests:
from course.models import InstantFlowRequest
instant_flow_requests = list(InstantFlowRequest.objects
.filter(
course=pctx.course,
start_time__lte=now_datetime,
end_time__gte=now_datetime,
cancelled=False)
.order_by("start_time"))
else:
instant_flow_requests = []
args.update({
"course": pctx.course,
"pperm": ParticipationPermissionWrapper(pctx),
"participation": pctx.participation,
"num_instant_flow_requests": len(instant_flow_requests),
"instant_flow_requests":
[(i+1, r) for i, r in enumerate(instant_flow_requests)],
})
return render(pctx.request, template_name, args)
# }}}
# {{{ page cache
class PageInstanceCache:
"""Caches instances of :class:`course.page.Page`."""
def __init__(self, repo, course, flow_id):
self.repo = repo
self.course = course
self.flow_id = flow_id
self.flow_desc_cache = {}
self.page_cache = {}
def get_flow_desc_from_cache(self, commit_sha):
try:
return self.flow_desc_cache[commit_sha]
except KeyError:
flow_desc = get_flow_desc(self.repo, self.course,
self.flow_id, commit_sha)
self.flow_desc_cache[commit_sha] = flow_desc
return flow_desc
def get_page(self, group_id, page_id, commit_sha):
key = (group_id, page_id, commit_sha)
try:
return self.page_cache[key]
except KeyError:
from course.content import get_flow_page_desc, instantiate_flow_page
page_desc = get_flow_page_desc(
self.flow_id,
self.get_flow_desc_from_cache(commit_sha),
group_id, page_id)
page = instantiate_flow_page(
location="flow '%s', group, '%s', page '%s'"
% (self.flow_id, group_id, page_id),
repo=self.repo, page_desc=page_desc,
commit_sha=commit_sha)
self.page_cache[key] = page
return page
# }}}
# {{{ codemirror config
def get_codemirror_widget(
language_mode: str,
interaction_mode: Optional[str],
config: dict | None = None,
addon_css: tuple = (),
addon_js: tuple = (),
dependencies: tuple = (),
read_only: bool = False,
autofocus: bool = False,
additional_keys: Optional[
Dict[str, Union[str, CodeMirrorJavascript]]] = None,
attrs: Optional[Dict[str, str]] = None,
) -> tuple[CodeMirrorTextarea, str]:
from codemirror import CodeMirrorJavascript, CodeMirrorTextarea # noqa
if additional_keys is None:
additional_keys = {}
theme = "default"
if read_only:
theme += " relate-readonly"
from django.urls import reverse
help_text = (_("Press F9 to toggle full-screen mode. ")
+ _("Set editor mode in <a href='%s'>user profile</a>.")
% reverse("relate-user_profile"))
actual_addon_css = (
"dialog/dialog",
"display/fullscreen",
) + addon_css
actual_addon_js = (
"search/searchcursor",
"dialog/dialog",
"search/search",
"comment/comment",
"edit/matchbrackets",
"display/fullscreen",
"selection/active-line",
"edit/trailingspace",
) + addon_js
if language_mode == "python":
indent_unit = 4
else:
indent_unit = 2
extra_keys = {
"Ctrl-/": "toggleComment",
"Tab": CodeMirrorJavascript("""function(cm)
{
// from https://github.com/codemirror/CodeMirror/issues/988
if (cm.doc.somethingSelected()) {
return CodeMirror.Pass;
}
var spacesPerTab = cm.getOption("indentUnit");
var spacesToInsert = (
spacesPerTab
- (cm.doc.getCursor("start").ch % spacesPerTab));
var spaces = Array(spacesToInsert + 1).join(" ");
cm.replaceSelection(spaces, "end", "+input");
}"""),
"Shift-Tab": "indentLess",
"F9": CodeMirrorJavascript("""function(cm) {
cm.setOption("fullScreen",
!cm.getOption("fullScreen"));
}"""),
}
extra_keys.update(additional_keys)
actual_config = {
"fixedGutter": True,
"matchBrackets": True,
"styleActiveLine": True,
"showTrailingSpace": True,
"indentUnit": indent_unit,
"readOnly": read_only,
"extraKeys": extra_keys,
}
if autofocus:
actual_config["autofocus"] = True
if interaction_mode == "vim":
actual_config["vimMode"] = True
actual_addon_js += ("../keymap/vim",)
elif interaction_mode == "emacs":
actual_config["keyMap"] = "emacs"
actual_addon_js += ("../keymap/emacs",)
elif interaction_mode == "sublime":
actual_config["keyMap"] = "sublime"
actual_addon_js += ("../keymap/sublime",)
# every other interaction mode goes to default
if config is not None:
actual_config.update(config)
if attrs is None:
attrs = {}
return CodeMirrorTextarea(
mode=language_mode,
dependencies=dependencies,
theme=theme,
addon_css=actual_addon_css,
addon_js=actual_addon_js,
config=actual_config,
attrs=attrs), help_text
# }}}
# {{{ facility processing
def get_facilities_config(
request: http.HttpRequest | None = None
) -> dict[str, dict[str, Any]] | None:
from django.conf import settings
# This is called during offline validation, where Django isn't really set up.
# The getattr makes this usable.
facilities = getattr(settings, "RELATE_FACILITIES", None)
if facilities is None:
# Only happens during offline validation. Suppresses errors there.
return None
if callable(facilities):
from course.views import get_now_or_fake_time
now_datetime = get_now_or_fake_time(request)
result = facilities(now_datetime)
if not isinstance(result, dict):
raise RuntimeError("RELATE_FACILITIES must return a dictionary")
return result
else:
return facilities
class FacilityFindingMiddleware:
def __init__(self, get_response):
self.get_response = get_response
def __call__(self, request):
pretend_facilities = request.session.get("relate_pretend_facilities")
if pretend_facilities is not None:
facilities = pretend_facilities
else:
import ipaddress
remote_address = ipaddress.ip_address(
str(request.META["REMOTE_ADDR"]))
facilities = set()
for name, props in get_facilities_config(request).items():
ip_ranges = props.get("ip_ranges", [])
for ir in ip_ranges:
if remote_address in ipaddress.ip_network(str(ir)):
facilities.add(name)
request.relate_facilities = frozenset(facilities)
return self.get_response(request)
# }}}
def get_col_contents_or_empty(row, index):
if index >= len(row):
return ""
else:
return row[index]
def csv_data_importable(file_contents, column_idx_list, header_count):
import csv
spamreader = csv.reader(file_contents)
n_header_row = 0
try:
row0 = spamreader.__next__()
except Exception as e:
err_msg = type(e).__name__
err_str = str(e)
if err_msg == "Error":
err_msg = ""
else:
err_msg += ": "
err_msg += err_str
if "line contains NUL" in err_str:
err_msg = err_msg.rstrip(".") + ". "
# This message changed over time.
# Make the message uniform to please the tests.
err_msg = err_msg.replace("NULL byte", "NUL")
err_msg += _("Are you sure the file is a CSV file other "
"than a Microsoft Excel file?")
return False, (
string_concat(
pgettext_lazy("Starting of Error message", "Error"),
": %s" % err_msg))
from itertools import chain
for row in chain([row0], spamreader):
n_header_row += 1
if n_header_row <= header_count:
continue
try:
for column_idx in column_idx_list:
if column_idx is not None:
str(get_col_contents_or_empty(row, column_idx-1))
except UnicodeDecodeError:
return False, (
_("Error: Columns to be imported contain "
"non-ASCII characters. "
"Please save your CSV file as utf-8 encoded "
"and import again.")
)
except Exception as e:
return False, (
string_concat(
pgettext_lazy("Starting of Error message",
"Error"),
": %(err_type)s: %(err_str)s")
% {
"err_type": type(e).__name__,
"err_str": str(e)}
)
return True, ""
def will_use_masked_profile_for_email(
recipient_email: None | str | list[str]) -> bool:
if not recipient_email:
return False
if not isinstance(recipient_email, list):
recipient_email = [recipient_email]
from course.models import Participation # noqa
recepient_participations = (
Participation.objects.filter(
user__email__in=recipient_email
))
from course.constants import participation_permission as pperm
for part in recepient_participations:
if part.has_permission(pperm.view_participant_masked_profile):
return True
return False
def get_course_specific_language_choices() -> tuple[tuple[str, Any], ...]:
from collections import OrderedDict
from django.conf import settings
all_options = ((settings.LANGUAGE_CODE, None),) + tuple(settings.LANGUAGES)
filtered_options_dict = OrderedDict(all_options)
def get_default_option() -> tuple[str, str]:
# For the default language used, if USE_I18N is True, display
# "Disabled". Otherwise display its lang info.
if not settings.USE_I18N:
formatted_descr = (
get_formatted_options(settings.LANGUAGE_CODE, None)[1])
else:
formatted_descr = _("disabled (i.e., displayed language is "
"determined by user's browser preference)")
return "", string_concat("%s: " % _("Default"), formatted_descr)
def get_formatted_options(
lang_code: str, lang_descr: str | None) -> tuple[str, str]:
if lang_descr is None:
lang_descr = OrderedDict(settings.LANGUAGES).get(lang_code)
if lang_descr is None:
try:
lang_info = translation.get_language_info(lang_code)
lang_descr = lang_info["name_translated"]
except KeyError:
return (lang_code.strip(), lang_code)
return (lang_code.strip(),
string_concat(_(lang_descr), " (%s)" % lang_code))
filtered_options = (
[get_default_option()]
+ [get_formatted_options(k, v)
for k, v in filtered_options_dict.items()])
# filtered_options[1] is the option for settings.LANGUAGE_CODE
# it's already displayed when settings.USE_I18N is False
if not settings.USE_I18N:
filtered_options.pop(1)
return tuple(filtered_options)
class LanguageOverride(ContextDecorator):
def __init__(self, course: Course, deactivate: bool = False) -> None:
self.course = course
self.deactivate = deactivate
if course.force_lang:
self.language = course.force_lang
else:
from django.conf import settings
self.language = settings.RELATE_ADMIN_EMAIL_LOCALE
def __enter__(self) -> None:
self.old_language = translation.get_language()
if self.language is not None:
translation.activate(self.language)
else:
translation.deactivate_all()
def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any) -> None:
if self.old_language is None:
translation.deactivate_all()
elif self.deactivate:
translation.deactivate()
else:
translation.activate(self.old_language)
class RelateJinjaMacroBase:
def __init__(
self,
course: Course | None,
repo: Repo_ish,
commit_sha: bytes) -> None:
self.course = course
self.repo = repo
self.commit_sha = commit_sha
@property
def name(self):
# The name of the method used in the template
raise NotImplementedError()
def __call__(self, *args: Any, **kwargs: Any) -> str:
raise NotImplementedError()
# {{{ ipynb utilities
class IpynbJinjaMacro(RelateJinjaMacroBase):
name = "render_notebook_cells"
def _render_notebook_cells(self,
ipynb_path: str,
indices: Any | None = None,
clear_output: bool | None = False,
clear_markdown: bool | None = False,
**kwargs: Any) -> str:
from course.content import get_repo_blob_data_cached
try:
ipynb_source = get_repo_blob_data_cached(self.repo, ipynb_path,
self.commit_sha).decode()
return self._render_notebook_from_source(
ipynb_source,
indices=indices,
clear_output=clear_output,
clear_markdown=clear_markdown,
**kwargs
)
except ObjectDoesNotExist:
raise
__call__ = _render_notebook_cells # type: ignore
def _render_notebook_from_source(
self, ipynb_source: str, indices: Any | None = None,
clear_output: bool | None = False,
clear_markdown: bool | None = False, **kwargs: Any) -> str:
"""
Get HTML format of ipython notebook so as to be rendered in RELATE flow
pages.
:param ipynb_source: the :class:`text` read from a ipython notebook.
:param indices: a :class:`list` instance, 0-based indices of notebook cells
which are expected to be rendered.
:param clear_output: a :class:`bool` instance, indicating whether existing
execution output of code cells should be removed.
:param clear_markdown: a :class:`bool` instance, indicating whether markdown
cells will be ignored..
:return:
"""
import nbformat
from nbformat.reader import parse_json
nb_source_dict = parse_json(ipynb_source)
if indices:
nb_source_dict.update(
{"cells": [nb_source_dict["cells"][idx] for idx in indices]})
if clear_markdown:
nb_source_dict.update(
{"cells": [cell for cell in nb_source_dict["cells"]
if cell["cell_type"] != "markdown"]})
nb_source_dict.update({"cells": nb_source_dict["cells"]})
import json
ipynb_source = json.dumps(nb_source_dict)
notebook = nbformat.reads(ipynb_source, as_version=4)
from traitlets.config import Config
c = Config()
# This is to prevent execution of arbitrary code from note book
c.ExecutePreprocessor.enabled = False
if clear_output:
c.ClearOutputPreprocessor.enabled = True
c.CSSHTMLHeaderPreprocessor.enabled = False
c.HighlightMagicsPreprocessor.enabled = False
import os
# Place the template in course template dir
import course
template_path = os.path.join(
os.path.dirname(course.__file__),
"templates", "course", "jinja2")
c.TemplateExporter.template_path.append(template_path)
from nbconvert import HTMLExporter
html_exporter = HTMLExporter(
config=c,
template_file="nbconvert_template.tpl"
)
(body, resources) = html_exporter.from_notebook_node(notebook)
return "<div class='relate-notebook-container'>%s</div>" % body
NBCONVERT_PRE_OPEN_RE = re.compile(r"<pre\s*>\s*<relate_ipynb\s*>")
NBCONVERT_PRE_CLOSE_RE = re.compile(r"</relate_ipynb\s*>\s*</pre\s*>")
class NBConvertHTMLPostprocessor(markdown.postprocessors.Postprocessor):
def run(self, text):
text = NBCONVERT_PRE_OPEN_RE.sub("", text)
text = NBCONVERT_PRE_CLOSE_RE.sub("", text)
return text
class NBConvertExtension(markdown.Extension):
def extendMarkdown(self, md, md_globals): # noqa
md.postprocessors["relate_nbconvert"] = NBConvertHTMLPostprocessor(md)
# }}}
# vim: foldmethod=marker
|
4433e2e892d5a82744045ded297ae6a9cda53b99
|
af368ad82efda90ca9de73c57f2822aa27a21044
|
/rigl/experimental/jax/pruning/init_test.py
|
d066d01c161f89c698fe02fe04d0c2e71dcc68f0
|
[
"Apache-2.0"
] |
permissive
|
google-research/rigl
|
e24f05bfd872f31194a047cf1b3a0bfa12ab45aa
|
d39fc7d46505cb3196cb1edeb32ed0b6dd44c0f9
|
refs/heads/master
| 2023-08-25T04:54:29.014303
| 2023-01-13T13:40:32
| 2023-01-26T17:47:13
| 224,050,000
| 324
| 61
|
Apache-2.0
| 2022-07-04T22:02:04
| 2019-11-25T22:03:16
|
Python
|
UTF-8
|
Python
| false
| false
| 8,068
|
py
|
init_test.py
|
# coding=utf-8
# Copyright 2022 RigL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for weight_symmetry.pruning.init."""
from typing import Any, Mapping, Optional
from absl.testing import absltest
import flax
import jax
import jax.numpy as jnp
from rigl.experimental.jax.pruning import init
from rigl.experimental.jax.pruning import masked
class MaskedDense(flax.deprecated.nn.Module):
"""Single-layer Dense Masked Network."""
NUM_FEATURES: int = 32
def apply(self,
inputs,
mask = None):
inputs = inputs.reshape(inputs.shape[0], -1)
layer_mask = mask['MaskedModule_0'] if mask else None
return masked.MaskedModule(
inputs,
features=self.NUM_FEATURES,
wrapped_module=flax.deprecated.nn.Dense,
mask=layer_mask,
kernel_init=flax.deprecated.nn.initializers.kaiming_normal())
class MaskedDenseSparseInit(flax.deprecated.nn.Module):
"""Single-layer Dense Masked Network."""
NUM_FEATURES: int = 32
def apply(self,
inputs,
*args,
mask = None,
**kwargs):
inputs = inputs.reshape(inputs.shape[0], -1)
layer_mask = mask['MaskedModule_0'] if mask else None
return masked.MaskedModule(
inputs,
features=self.NUM_FEATURES,
wrapped_module=flax.deprecated.nn.Dense,
mask=layer_mask,
kernel_init=init.kaiming_sparse_normal(
layer_mask['kernel'] if layer_mask is not None else None),
**kwargs)
class MaskedCNN(flax.deprecated.nn.Module):
"""Single-layer CNN Masked Network."""
NUM_FEATURES: int = 32
def apply(self,
inputs,
mask = None):
layer_mask = mask['MaskedModule_0'] if mask else None
return masked.MaskedModule(
inputs,
features=self.NUM_FEATURES,
wrapped_module=flax.deprecated.nn.Conv,
kernel_size=(3, 3),
mask=layer_mask,
kernel_init=flax.deprecated.nn.initializers.kaiming_normal())
class MaskedCNNSparseInit(flax.deprecated.nn.Module):
"""Single-layer CNN Masked Network."""
NUM_FEATURES: int = 32
def apply(self,
inputs,
*args,
mask = None,
**kwargs):
layer_mask = mask['MaskedModule_0'] if mask else None
return masked.MaskedModule(
inputs,
features=self.NUM_FEATURES,
wrapped_module=flax.deprecated.nn.Conv,
kernel_size=(3, 3),
mask=layer_mask,
kernel_init=init.kaiming_sparse_normal(
layer_mask['kernel'] if layer_mask is not None else None),
**kwargs)
class InitTest(absltest.TestCase):
def setUp(self):
super().setUp()
self._rng = jax.random.PRNGKey(42)
self._batch_size = 2
self._input_shape = ((self._batch_size, 28, 28, 1), jnp.float32)
self._input = jnp.ones(*self._input_shape)
def test_init_kaiming_sparse_normal_output(self):
"""Tests the output shape/type of kaiming normal sparse initialization."""
input_array = jnp.ones((64, 16), jnp.float32)
mask = jax.random.bernoulli(self._rng, shape=(64, 16))
base_init = flax.deprecated.nn.initializers.kaiming_normal()(
self._rng, input_array.shape, input_array.dtype)
sparse_init = init.kaiming_sparse_normal(mask)(self._rng, input_array.shape,
input_array.dtype)
with self.subTest(name='test_sparse_init_output_shape'):
self.assertSequenceEqual(sparse_init.shape, base_init.shape)
with self.subTest(name='test_sparse_init_output_dtype'):
self.assertEqual(sparse_init.dtype, base_init.dtype)
with self.subTest(name='test_sparse_init_output_notallzero'):
self.assertTrue((sparse_init != 0).any())
def test_dense_no_mask(self):
"""Checks that in the special case of no mask, init is same as base_init."""
_, initial_params = MaskedDense.init_by_shape(self._rng,
(self._input_shape,))
self._unmasked_model = flax.deprecated.nn.Model(MaskedDense, initial_params)
_, initial_params = MaskedDenseSparseInit.init_by_shape(
jax.random.PRNGKey(42), (self._input_shape,), mask=None)
self._masked_model_sparse_init = flax.deprecated.nn.Model(
MaskedDenseSparseInit, initial_params)
self.assertTrue(
jnp.isclose(
self._masked_model_sparse_init.params['MaskedModule_0']['unmasked']
['kernel'], self._unmasked_model.params['MaskedModule_0']
['unmasked']['kernel']).all())
def test_dense_sparse_init_kaiming(self):
"""Checks kaiming normal sparse initialization for dense layer."""
_, initial_params = MaskedDense.init_by_shape(self._rng,
(self._input_shape,))
self._unmasked_model = flax.deprecated.nn.Model(MaskedDense, initial_params)
mask = masked.simple_mask(self._unmasked_model, jnp.ones,
masked.WEIGHT_PARAM_NAMES)
_, initial_params = MaskedDenseSparseInit.init_by_shape(
jax.random.PRNGKey(42), (self._input_shape,), mask=mask)
self._masked_model_sparse_init = flax.deprecated.nn.Model(
MaskedDenseSparseInit, initial_params)
mean_init = jnp.mean(
self._unmasked_model.params['MaskedModule_0']['unmasked']['kernel'])
stddev_init = jnp.std(
self._unmasked_model.params['MaskedModule_0']['unmasked']['kernel'])
mean_sparse_init = jnp.mean(
self._masked_model_sparse_init.params['MaskedModule_0']['unmasked']
['kernel'])
stddev_sparse_init = jnp.std(
self._masked_model_sparse_init.params['MaskedModule_0']['unmasked']
['kernel'])
with self.subTest(name='test_cnn_sparse_init_mean'):
self.assertBetween(mean_sparse_init, mean_init - 2 * stddev_init,
mean_init + 2 * stddev_init)
with self.subTest(name='test_cnn_sparse_init_stddev'):
self.assertBetween(stddev_sparse_init, 0.5 * stddev_init,
1.5 * stddev_init)
def test_cnn_sparse_init_kaiming(self):
"""Checks kaiming normal sparse initialization for convolutional layer."""
_, initial_params = MaskedCNN.init_by_shape(self._rng, (self._input_shape,))
self._unmasked_model = flax.deprecated.nn.Model(MaskedCNN, initial_params)
mask = masked.simple_mask(self._unmasked_model, jnp.ones,
masked.WEIGHT_PARAM_NAMES)
_, initial_params = MaskedCNNSparseInit.init_by_shape(
jax.random.PRNGKey(42), (self._input_shape,), mask=mask)
self._masked_model_sparse_init = flax.deprecated.nn.Model(
MaskedCNNSparseInit, initial_params)
mean_init = jnp.mean(
self._unmasked_model.params['MaskedModule_0']['unmasked']['kernel'])
stddev_init = jnp.std(
self._unmasked_model.params['MaskedModule_0']['unmasked']['kernel'])
mean_sparse_init = jnp.mean(
self._masked_model_sparse_init.params['MaskedModule_0']['unmasked']
['kernel'])
stddev_sparse_init = jnp.std(
self._masked_model_sparse_init.params['MaskedModule_0']['unmasked']
['kernel'])
with self.subTest(name='test_cnn_sparse_init_mean'):
self.assertBetween(mean_sparse_init, mean_init - 2 * stddev_init,
mean_init + 2 * stddev_init)
with self.subTest(name='test_cnn_sparse_init_stddev'):
self.assertBetween(stddev_sparse_init, 0.5 * stddev_init,
1.5 * stddev_init)
if __name__ == '__main__':
absltest.main()
|
e1000982b953f148f8b16a46d4d1c80f1ef5bb58
|
a3c44a5f34c8fd2c26ddcc7593e265035a847c7e
|
/cuda_functions/nms_3D/pth_nms.py
|
3639b5bddb1655de0220aa066253ad21b4fd98db
|
[
"Apache-2.0"
] |
permissive
|
MIC-DKFZ/medicaldetectiontoolkit
|
2531b3342305428a0499018563e1e2eab1cf86ef
|
6753237cc4bae558a94b919735d545a2de075e07
|
refs/heads/master
| 2023-07-26T00:34:00.406394
| 2022-04-04T08:29:54
| 2022-04-04T08:29:54
| 152,747,947
| 714
| 180
|
Apache-2.0
| 2023-07-18T12:33:26
| 2018-10-12T12:34:57
|
Python
|
UTF-8
|
Python
| false
| false
| 812
|
py
|
pth_nms.py
|
import torch
from ._ext import nms
def nms_gpu(dets, thresh):
"""
dets has to be a tensor
"""
scores = dets[:, -1]
order = scores.sort(0, descending=True)[1]
dets = dets[order].contiguous()
keep = torch.LongTensor(dets.size(0))
num_out = torch.LongTensor(1)
nms.gpu_nms(keep, num_out, dets, thresh)
return order[keep[:num_out[0]].cuda()].contiguous()
def nms_cpu(dets, thresh):
dets = dets.cpu()
x1 = dets[:, 0]
y1 = dets[:, 1]
x2 = dets[:, 2]
y2 = dets[:, 3]
z1 = dets[:, 4]
z2 = dets[:, 5]
scores = dets[:, 6]
areas = (x2 - x1 +1) * (y2 - y1 +1) * (z2 - z1 +1)
order = scores.sort(0, descending=True)[1]
keep = torch.LongTensor(dets.size(0))
num_out = torch.LongTensor(1)
nms.cpu_nms(keep, num_out, dets, order, areas, thresh)
return keep[:num_out[0]]
|
07dc554743359a3b9175ff0c823bf6531e8675ab
|
a28d672c50faf9632983287d206e8691282cab51
|
/basicsr/archs/basicvsr_arch.py
|
ed7b824eae108a9bcca57f1c14dd0d8afafc4f58
|
[
"Apache-2.0"
] |
permissive
|
XPixelGroup/BasicSR
|
42cf240fbc91bee10cfa12930ab86820969e854c
|
033cd6896d898fdd3dcda32e3102a792efa1b8f4
|
refs/heads/master
| 2023-06-07T15:16:21.940587
| 2023-02-02T07:07:47
| 2023-02-02T07:07:47
| 130,259,654
| 2,088
| 300
|
Apache-2.0
| 2023-09-14T00:50:17
| 2018-04-19T18:58:00
|
Python
|
UTF-8
|
Python
| false
| false
| 12,595
|
py
|
basicvsr_arch.py
|
import torch
from torch import nn as nn
from torch.nn import functional as F
from basicsr.utils.registry import ARCH_REGISTRY
from .arch_util import ResidualBlockNoBN, flow_warp, make_layer
from .edvr_arch import PCDAlignment, TSAFusion
from .spynet_arch import SpyNet
@ARCH_REGISTRY.register()
class BasicVSR(nn.Module):
"""A recurrent network for video SR. Now only x4 is supported.
Args:
num_feat (int): Number of channels. Default: 64.
num_block (int): Number of residual blocks for each branch. Default: 15
spynet_path (str): Path to the pretrained weights of SPyNet. Default: None.
"""
def __init__(self, num_feat=64, num_block=15, spynet_path=None):
super().__init__()
self.num_feat = num_feat
# alignment
self.spynet = SpyNet(spynet_path)
# propagation
self.backward_trunk = ConvResidualBlocks(num_feat + 3, num_feat, num_block)
self.forward_trunk = ConvResidualBlocks(num_feat + 3, num_feat, num_block)
# reconstruction
self.fusion = nn.Conv2d(num_feat * 2, num_feat, 1, 1, 0, bias=True)
self.upconv1 = nn.Conv2d(num_feat, num_feat * 4, 3, 1, 1, bias=True)
self.upconv2 = nn.Conv2d(num_feat, 64 * 4, 3, 1, 1, bias=True)
self.conv_hr = nn.Conv2d(64, 64, 3, 1, 1)
self.conv_last = nn.Conv2d(64, 3, 3, 1, 1)
self.pixel_shuffle = nn.PixelShuffle(2)
# activation functions
self.lrelu = nn.LeakyReLU(negative_slope=0.1, inplace=True)
def get_flow(self, x):
b, n, c, h, w = x.size()
x_1 = x[:, :-1, :, :, :].reshape(-1, c, h, w)
x_2 = x[:, 1:, :, :, :].reshape(-1, c, h, w)
flows_backward = self.spynet(x_1, x_2).view(b, n - 1, 2, h, w)
flows_forward = self.spynet(x_2, x_1).view(b, n - 1, 2, h, w)
return flows_forward, flows_backward
def forward(self, x):
"""Forward function of BasicVSR.
Args:
x: Input frames with shape (b, n, c, h, w). n is the temporal dimension / number of frames.
"""
flows_forward, flows_backward = self.get_flow(x)
b, n, _, h, w = x.size()
# backward branch
out_l = []
feat_prop = x.new_zeros(b, self.num_feat, h, w)
for i in range(n - 1, -1, -1):
x_i = x[:, i, :, :, :]
if i < n - 1:
flow = flows_backward[:, i, :, :, :]
feat_prop = flow_warp(feat_prop, flow.permute(0, 2, 3, 1))
feat_prop = torch.cat([x_i, feat_prop], dim=1)
feat_prop = self.backward_trunk(feat_prop)
out_l.insert(0, feat_prop)
# forward branch
feat_prop = torch.zeros_like(feat_prop)
for i in range(0, n):
x_i = x[:, i, :, :, :]
if i > 0:
flow = flows_forward[:, i - 1, :, :, :]
feat_prop = flow_warp(feat_prop, flow.permute(0, 2, 3, 1))
feat_prop = torch.cat([x_i, feat_prop], dim=1)
feat_prop = self.forward_trunk(feat_prop)
# upsample
out = torch.cat([out_l[i], feat_prop], dim=1)
out = self.lrelu(self.fusion(out))
out = self.lrelu(self.pixel_shuffle(self.upconv1(out)))
out = self.lrelu(self.pixel_shuffle(self.upconv2(out)))
out = self.lrelu(self.conv_hr(out))
out = self.conv_last(out)
base = F.interpolate(x_i, scale_factor=4, mode='bilinear', align_corners=False)
out += base
out_l[i] = out
return torch.stack(out_l, dim=1)
class ConvResidualBlocks(nn.Module):
"""Conv and residual block used in BasicVSR.
Args:
num_in_ch (int): Number of input channels. Default: 3.
num_out_ch (int): Number of output channels. Default: 64.
num_block (int): Number of residual blocks. Default: 15.
"""
def __init__(self, num_in_ch=3, num_out_ch=64, num_block=15):
super().__init__()
self.main = nn.Sequential(
nn.Conv2d(num_in_ch, num_out_ch, 3, 1, 1, bias=True), nn.LeakyReLU(negative_slope=0.1, inplace=True),
make_layer(ResidualBlockNoBN, num_block, num_feat=num_out_ch))
def forward(self, fea):
return self.main(fea)
@ARCH_REGISTRY.register()
class IconVSR(nn.Module):
"""IconVSR, proposed also in the BasicVSR paper.
Args:
num_feat (int): Number of channels. Default: 64.
num_block (int): Number of residual blocks for each branch. Default: 15.
keyframe_stride (int): Keyframe stride. Default: 5.
temporal_padding (int): Temporal padding. Default: 2.
spynet_path (str): Path to the pretrained weights of SPyNet. Default: None.
edvr_path (str): Path to the pretrained EDVR model. Default: None.
"""
def __init__(self,
num_feat=64,
num_block=15,
keyframe_stride=5,
temporal_padding=2,
spynet_path=None,
edvr_path=None):
super().__init__()
self.num_feat = num_feat
self.temporal_padding = temporal_padding
self.keyframe_stride = keyframe_stride
# keyframe_branch
self.edvr = EDVRFeatureExtractor(temporal_padding * 2 + 1, num_feat, edvr_path)
# alignment
self.spynet = SpyNet(spynet_path)
# propagation
self.backward_fusion = nn.Conv2d(2 * num_feat, num_feat, 3, 1, 1, bias=True)
self.backward_trunk = ConvResidualBlocks(num_feat + 3, num_feat, num_block)
self.forward_fusion = nn.Conv2d(2 * num_feat, num_feat, 3, 1, 1, bias=True)
self.forward_trunk = ConvResidualBlocks(2 * num_feat + 3, num_feat, num_block)
# reconstruction
self.upconv1 = nn.Conv2d(num_feat, num_feat * 4, 3, 1, 1, bias=True)
self.upconv2 = nn.Conv2d(num_feat, 64 * 4, 3, 1, 1, bias=True)
self.conv_hr = nn.Conv2d(64, 64, 3, 1, 1)
self.conv_last = nn.Conv2d(64, 3, 3, 1, 1)
self.pixel_shuffle = nn.PixelShuffle(2)
# activation functions
self.lrelu = nn.LeakyReLU(negative_slope=0.1, inplace=True)
def pad_spatial(self, x):
"""Apply padding spatially.
Since the PCD module in EDVR requires that the resolution is a multiple
of 4, we apply padding to the input LR images if their resolution is
not divisible by 4.
Args:
x (Tensor): Input LR sequence with shape (n, t, c, h, w).
Returns:
Tensor: Padded LR sequence with shape (n, t, c, h_pad, w_pad).
"""
n, t, c, h, w = x.size()
pad_h = (4 - h % 4) % 4
pad_w = (4 - w % 4) % 4
# padding
x = x.view(-1, c, h, w)
x = F.pad(x, [0, pad_w, 0, pad_h], mode='reflect')
return x.view(n, t, c, h + pad_h, w + pad_w)
def get_flow(self, x):
b, n, c, h, w = x.size()
x_1 = x[:, :-1, :, :, :].reshape(-1, c, h, w)
x_2 = x[:, 1:, :, :, :].reshape(-1, c, h, w)
flows_backward = self.spynet(x_1, x_2).view(b, n - 1, 2, h, w)
flows_forward = self.spynet(x_2, x_1).view(b, n - 1, 2, h, w)
return flows_forward, flows_backward
def get_keyframe_feature(self, x, keyframe_idx):
if self.temporal_padding == 2:
x = [x[:, [4, 3]], x, x[:, [-4, -5]]]
elif self.temporal_padding == 3:
x = [x[:, [6, 5, 4]], x, x[:, [-5, -6, -7]]]
x = torch.cat(x, dim=1)
num_frames = 2 * self.temporal_padding + 1
feats_keyframe = {}
for i in keyframe_idx:
feats_keyframe[i] = self.edvr(x[:, i:i + num_frames].contiguous())
return feats_keyframe
def forward(self, x):
b, n, _, h_input, w_input = x.size()
x = self.pad_spatial(x)
h, w = x.shape[3:]
keyframe_idx = list(range(0, n, self.keyframe_stride))
if keyframe_idx[-1] != n - 1:
keyframe_idx.append(n - 1) # last frame is a keyframe
# compute flow and keyframe features
flows_forward, flows_backward = self.get_flow(x)
feats_keyframe = self.get_keyframe_feature(x, keyframe_idx)
# backward branch
out_l = []
feat_prop = x.new_zeros(b, self.num_feat, h, w)
for i in range(n - 1, -1, -1):
x_i = x[:, i, :, :, :]
if i < n - 1:
flow = flows_backward[:, i, :, :, :]
feat_prop = flow_warp(feat_prop, flow.permute(0, 2, 3, 1))
if i in keyframe_idx:
feat_prop = torch.cat([feat_prop, feats_keyframe[i]], dim=1)
feat_prop = self.backward_fusion(feat_prop)
feat_prop = torch.cat([x_i, feat_prop], dim=1)
feat_prop = self.backward_trunk(feat_prop)
out_l.insert(0, feat_prop)
# forward branch
feat_prop = torch.zeros_like(feat_prop)
for i in range(0, n):
x_i = x[:, i, :, :, :]
if i > 0:
flow = flows_forward[:, i - 1, :, :, :]
feat_prop = flow_warp(feat_prop, flow.permute(0, 2, 3, 1))
if i in keyframe_idx:
feat_prop = torch.cat([feat_prop, feats_keyframe[i]], dim=1)
feat_prop = self.forward_fusion(feat_prop)
feat_prop = torch.cat([x_i, out_l[i], feat_prop], dim=1)
feat_prop = self.forward_trunk(feat_prop)
# upsample
out = self.lrelu(self.pixel_shuffle(self.upconv1(feat_prop)))
out = self.lrelu(self.pixel_shuffle(self.upconv2(out)))
out = self.lrelu(self.conv_hr(out))
out = self.conv_last(out)
base = F.interpolate(x_i, scale_factor=4, mode='bilinear', align_corners=False)
out += base
out_l[i] = out
return torch.stack(out_l, dim=1)[..., :4 * h_input, :4 * w_input]
class EDVRFeatureExtractor(nn.Module):
"""EDVR feature extractor used in IconVSR.
Args:
num_input_frame (int): Number of input frames.
num_feat (int): Number of feature channels
load_path (str): Path to the pretrained weights of EDVR. Default: None.
"""
def __init__(self, num_input_frame, num_feat, load_path):
super(EDVRFeatureExtractor, self).__init__()
self.center_frame_idx = num_input_frame // 2
# extract pyramid features
self.conv_first = nn.Conv2d(3, num_feat, 3, 1, 1)
self.feature_extraction = make_layer(ResidualBlockNoBN, 5, num_feat=num_feat)
self.conv_l2_1 = nn.Conv2d(num_feat, num_feat, 3, 2, 1)
self.conv_l2_2 = nn.Conv2d(num_feat, num_feat, 3, 1, 1)
self.conv_l3_1 = nn.Conv2d(num_feat, num_feat, 3, 2, 1)
self.conv_l3_2 = nn.Conv2d(num_feat, num_feat, 3, 1, 1)
# pcd and tsa module
self.pcd_align = PCDAlignment(num_feat=num_feat, deformable_groups=8)
self.fusion = TSAFusion(num_feat=num_feat, num_frame=num_input_frame, center_frame_idx=self.center_frame_idx)
# activation function
self.lrelu = nn.LeakyReLU(negative_slope=0.1, inplace=True)
if load_path:
self.load_state_dict(torch.load(load_path, map_location=lambda storage, loc: storage)['params'])
def forward(self, x):
b, n, c, h, w = x.size()
# extract features for each frame
# L1
feat_l1 = self.lrelu(self.conv_first(x.view(-1, c, h, w)))
feat_l1 = self.feature_extraction(feat_l1)
# L2
feat_l2 = self.lrelu(self.conv_l2_1(feat_l1))
feat_l2 = self.lrelu(self.conv_l2_2(feat_l2))
# L3
feat_l3 = self.lrelu(self.conv_l3_1(feat_l2))
feat_l3 = self.lrelu(self.conv_l3_2(feat_l3))
feat_l1 = feat_l1.view(b, n, -1, h, w)
feat_l2 = feat_l2.view(b, n, -1, h // 2, w // 2)
feat_l3 = feat_l3.view(b, n, -1, h // 4, w // 4)
# PCD alignment
ref_feat_l = [ # reference feature list
feat_l1[:, self.center_frame_idx, :, :, :].clone(), feat_l2[:, self.center_frame_idx, :, :, :].clone(),
feat_l3[:, self.center_frame_idx, :, :, :].clone()
]
aligned_feat = []
for i in range(n):
nbr_feat_l = [ # neighboring feature list
feat_l1[:, i, :, :, :].clone(), feat_l2[:, i, :, :, :].clone(), feat_l3[:, i, :, :, :].clone()
]
aligned_feat.append(self.pcd_align(nbr_feat_l, ref_feat_l))
aligned_feat = torch.stack(aligned_feat, dim=1) # (b, t, c, h, w)
# TSA fusion
return self.fusion(aligned_feat)
|
1078f30357bdce1417a9f9f7f31e4f61488877d0
|
68a76875beffd7636bb6913f8e97b83b1638a3cb
|
/flaskshop/plugin/models.py
|
95a0babc484ad41e42cacc1cfe78771c8dc1bf92
|
[] |
permissive
|
hjlarry/flask-shop
|
57d0d7f4ee82753041bdfbbcdd8c75517cffffc3
|
3ef48ed3b1899438df9ca9ae4a8ca8c722eab1f7
|
refs/heads/master
| 2023-07-20T07:19:13.168919
| 2023-07-16T09:35:18
| 2023-07-16T09:35:18
| 137,964,415
| 257
| 117
|
BSD-3-Clause
| 2023-07-16T09:35:19
| 2018-06-20T01:30:23
|
Python
|
UTF-8
|
Python
| false
| false
| 349
|
py
|
models.py
|
from flask import current_app
from flaskshop.database import Column, Model, db
class PluginRegistry(Model):
__tablename__ = "plugin_registry"
name = Column(db.String(100), unique=True)
enabled = Column(db.Boolean(), default=True)
@property
def info(self):
return current_app.pluggy.plugin_metadata.get(self.name, {})
|
f7b5b2b6dda36f77a6cb45eef1ab8b8ce9adae7b
|
1634f33c5021e8465a695fb5244504e2eeeecff5
|
/kitsune/messages/__init__.py
|
c67a9c3694ac9c38fa7e34f5a8f483fe2e0877c4
|
[] |
permissive
|
mozilla/kitsune
|
fee4b8598eb01f5b4add00f2f010b45e2a6ca901
|
67ec527bfc32c715bf9f29d5e01362c4903aebd2
|
refs/heads/main
| 2023-09-01T21:41:59.076570
| 2023-08-31T22:34:05
| 2023-08-31T22:34:05
| 489,645
| 1,218
| 697
|
BSD-3-Clause
| 2023-09-14T08:43:19
| 2010-01-26T18:53:57
|
Python
|
UTF-8
|
Python
| false
| false
| 57
|
py
|
__init__.py
|
# The number of threads per page.
MESSAGES_PER_PAGE = 20
|
6f57ad62dcb8c02c509a0ce319de6e525e3081fc
|
974d04d2ea27b1bba1c01015a98112d2afb78fe5
|
/test/collective/fleet/static_model_parallel_by_row.py
|
93c76ea71afb4c415091a1a7fd6bbfef8b073bdd
|
[
"Apache-2.0"
] |
permissive
|
PaddlePaddle/Paddle
|
b3d2583119082c8e4b74331dacc4d39ed4d7cff0
|
22a11a60e0e3d10a3cf610077a3d9942a6f964cb
|
refs/heads/develop
| 2023-08-17T21:27:30.568889
| 2023-08-17T12:38:22
| 2023-08-17T12:38:22
| 65,711,522
| 20,414
| 5,891
|
Apache-2.0
| 2023-09-14T19:20:51
| 2016-08-15T06:59:08
|
C++
|
UTF-8
|
Python
| false
| false
| 4,005
|
py
|
static_model_parallel_by_row.py
|
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from legacy_test.test_dist_base import TestDistRunnerBase, runtime_main
import paddle
from paddle import fluid
from paddle.distributed import fleet
paddle.enable_static()
DTYPE = "float32"
MODEL_PARALLEL_SIZE = 2
IN_SIZE = 2 * MODEL_PARALLEL_SIZE
OUT_SIZE = 2 * MODEL_PARALLEL_SIZE
# Fix seed for test
# fluid.default_startup_program().random_seed = 1
# fluid.default_main_program().random_seed = 1
def get_param_attr(weight, bias):
weight_attr = paddle.ParamAttr(
initializer=paddle.nn.initializer.Assign(weight)
)
bias_attr = paddle.ParamAttr(initializer=paddle.nn.initializer.Assign(bias))
return weight_attr, bias_attr
def create_model(data, rank):
np.random.seed(2021)
np_weight = np.random.uniform(-1, 1, size=(IN_SIZE, OUT_SIZE)).astype(DTYPE)
np_bias = np.random.uniform(-1, 1, size=(OUT_SIZE,)).astype(DTYPE)
if rank is not None:
start_row = 0 if rank == 0 else IN_SIZE // 2
np_weight_part = np_weight[start_row : start_row + IN_SIZE // 2, :]
weight_attr, bias_attr = get_param_attr(np_weight_part, np_bias)
result = paddle.distributed.split(
data,
size=(IN_SIZE, OUT_SIZE),
operation='linear',
axis=0,
num_partitions=MODEL_PARALLEL_SIZE,
weight_attr=weight_attr,
bias_attr=bias_attr,
)
else:
weight_attr, bias_attr = get_param_attr(np_weight, np_bias)
result = paddle.static.nn.fc(
data,
size=OUT_SIZE,
weight_attr=paddle.ParamAttr(
initializer=paddle.nn.initializer.Assign(np_weight)
),
bias_attr=bias_attr,
)
predict = paddle.sum(result)
return predict
class TestModelParallel(TestDistRunnerBase):
def get_model(self, batch_size=2, use_dgc=False, dist_strategy=None):
# Input data
data_in = paddle.static.data(
name='data_in', shape=[batch_size, IN_SIZE], dtype=DTYPE
)
if dist_strategy:
data_loader = fluid.io.DataLoader.from_generator(
feed_list=[data_in],
capacity=64,
use_double_buffer=False,
iterable=False,
)
if dist_strategy:
fleet.init(is_collective=True)
strategy = fleet.DistributedStrategy()
strategy.tensor_parallel = True
strategy.tensor_parallel_configs = {'tensor_parallel_degree': 2}
rank = fleet.worker_index() if dist_strategy else None
avg_cost = create_model(data_in, rank)
opt = paddle.optimizer.SGD(0.1)
if dist_strategy:
dist_opt = fleet.distributed_optimizer(
optimizer=opt, strategy=strategy
)
dist_opt.minimize(avg_cost)
else:
opt.minimize(avg_cost)
def gen_data():
np.random.seed(2021)
while True:
data = [np.random.random([IN_SIZE]).astype(DTYPE)]
yield data
train_reader = paddle.batch(gen_data, batch_size=batch_size)
if dist_strategy:
return None, avg_cost, train_reader, None, None, None, data_loader
else:
return None, avg_cost, train_reader, None, None, None
if __name__ == "__main__":
runtime_main(TestModelParallel)
|
3707464767049dde979c43e11362e2687e75c8cb
|
33fc96c5fcc983bb312518cfbe766e95173fb895
|
/setup.py
|
9a2095b63e9aa556f66aade864a9d021d07c3e22
|
[
"MIT"
] |
permissive
|
jr-robotics/robo-gym
|
47439a319a40ed159ebdfed510e98f9e0a9d7611
|
93386e52d259399e3939dcef0e96fa5208c020e3
|
refs/heads/master
| 2022-05-06T07:54:11.917706
| 2022-03-09T15:02:26
| 2022-03-09T15:02:26
| 252,740,567
| 360
| 67
|
MIT
| 2020-05-26T11:32:49
| 2020-04-03T13:29:26
|
Python
|
UTF-8
|
Python
| false
| false
| 1,064
|
py
|
setup.py
|
import setuptools
import sys, os
sys.path.insert(0, os.path.join(os.path.dirname(__file__), 'robo_gym'))
from version import VERSION
setuptools.setup(name='robo-gym',
version=VERSION,
description='robo-gym: an open source toolkit for Distributed Deep Reinforcement Learning on real and simulated robots.',
url='https://github.com/jr-robotics/robo-gym',
author="Matteo Lucchi, Friedemann Zindler",
author_email="matteo.lucchi@joanneum.at, friedemann.zindler@joanneum.at",
packages=setuptools.find_packages(),
include_package_data=True,
classifiers=[
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
],
install_requires=[
'gym',
'robo-gym-server-modules',
'numpy',
'scipy',
'pyyaml'
],
python_requires='>=3.6',
scripts = ['bin/run-rs-side-standard']
)
|
579c871867222e8fe561647e17f0bb2ba44d339f
|
34cf89f633059d0e5f2a444992c23278232aa3e3
|
/stix2/test/v20/test_base.py
|
18d3a50ab48b2fa71aced4a5ea9619b1c1021693
|
[
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-free-unknown",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
oasis-open/cti-python-stix2
|
e578fe17e42216bf2635781511be8d0b8612fcc0
|
f1c1632f3aa916cfa30b0b3625200f01c12dc5ed
|
refs/heads/master
| 2023-08-08T17:40:47.992285
| 2023-06-06T17:51:34
| 2023-06-06T17:51:34
| 81,590,907
| 336
| 112
|
BSD-3-Clause
| 2023-08-17T17:41:02
| 2017-02-10T17:50:12
|
Python
|
UTF-8
|
Python
| false
| false
| 583
|
py
|
test_base.py
|
import datetime as dt
import json
import pytest
import pytz
from stix2.base import STIXJSONEncoder
def test_encode_json_datetime():
now = dt.datetime(2017, 3, 22, 0, 0, 0, tzinfo=pytz.UTC)
test_dict = {'now': now}
expected = '{"now": "2017-03-22T00:00:00Z"}'
assert json.dumps(test_dict, cls=STIXJSONEncoder) == expected
def test_encode_json_object():
obj = object()
test_dict = {'obj': obj}
with pytest.raises(TypeError) as excinfo:
json.dumps(test_dict, cls=STIXJSONEncoder)
assert " is not JSON serializable" in str(excinfo.value)
|
e5f72cc609e301b869f666f0f5218c55d827c4bf
|
501e0774c35d8fa087e59e72f834cca3da463893
|
/beaver/tests/test_kinesis_transport.py
|
dbea972ecd6b0ae92490809766c4ec6c056a0fca
|
[
"MIT"
] |
permissive
|
python-beaver/python-beaver
|
c98ba58257b9f651baa6da1213701cfd05a07479
|
c9b63350c435f1f4c8aff35acd5af6d0d7fbb73f
|
refs/heads/master
| 2023-09-01T03:52:08.700279
| 2023-05-30T01:07:39
| 2023-05-30T01:07:39
| 5,171,841
| 133
| 48
|
MIT
| 2023-08-24T00:42:30
| 2012-07-24T22:05:06
|
Python
|
UTF-8
|
Python
| false
| false
| 5,777
|
py
|
test_kinesis_transport.py
|
# -*- coding: utf-8 -*-
import sys
if sys.version_info < (2, 7):
import unittest2 as unittest
else:
import unittest
import mock
import tempfile
import logging
import beaver
from beaver.config import BeaverConfig
from beaver.transports import create_transport
from beaver.unicode_dammit import unicode_dammit
from fixtures import Fixture
from moto import mock_kinesis
import boto.kinesis
class KinesisTests(unittest.TestCase):
@mock_kinesis
def _create_streams(self):
conn = boto.kinesis.connect_to_region("us-east-1")
conn.create_stream("stream1", 1)
conn.create_stream("stream2", 1)
@classmethod
def setUpClass(cls):
cls.logger = logging.getLogger(__name__)
empty_conf = tempfile.NamedTemporaryFile(delete=True)
cls.beaver_config = BeaverConfig(mock.Mock(config=empty_conf.name))
cls.beaver_config.set('transport', 'kinesis')
cls.beaver_config.set('logstash_version', 1)
output_file = Fixture.download_official_distribution()
Fixture.extract_distribution(output_file)
@mock_kinesis
def test_kinesis_default_auth_profile(self):
self._create_streams()
self.beaver_config.set('kinesis_aws_profile_name', None)
self.beaver_config.set('kinesis_aws_access_key', None)
self.beaver_config.set('kinesis_aws_secret_key', None)
self.beaver_config.set('kinesis_aws_stream', 'stream1')
transport = create_transport(self.beaver_config, logger=self.logger)
self.assertIsInstance(transport, beaver.transports.kinesis_transport.KinesisTransport)
transport.interrupt()
@mock_kinesis
def test_kinesis_auth_profile(self):
self._create_streams()
self.beaver_config.set('kinesis_aws_profile_name', 'beaver_stream')
self.beaver_config.set('kinesis_aws_access_key', None)
self.beaver_config.set('kinesis_aws_secret_key', None)
self.beaver_config.set('kinesis_aws_stream', 'stream1')
transport = create_transport(self.beaver_config, logger=self.logger)
self.assertIsInstance(transport, beaver.transports.kinesis_transport.KinesisTransport)
@mock_kinesis
def test_kinesis_auth_key(self):
self._create_streams()
self.beaver_config.set('kinesis_aws_profile_name', None)
self.beaver_config.set('kinesis_aws_access_key', 'beaver_test_key')
self.beaver_config.set('kinesis_aws_secret_key', 'beaver_test_secret')
self.beaver_config.set('kinesis_aws_stream', 'stream1')
transport = create_transport(self.beaver_config, logger=self.logger)
self.assertIsInstance(transport, beaver.transports.kinesis_transport.KinesisTransport)
transport.interrupt()
@mock_kinesis
def test_kinesis_auth_account_id(self):
self._create_streams()
self.beaver_config.set('kinesis_aws_stream_owner_acct_id', 'abc123')
self.beaver_config.set('kinesis_aws_profile_name', None)
self.beaver_config.set('kinesis_aws_access_key', 'beaver_test_key')
self.beaver_config.set('kinesis_aws_secret_key', 'beaver_test_secret')
self.beaver_config.set('kinesis_aws_stream', 'stream1')
transport = create_transport(self.beaver_config, logger=self.logger)
self.assertIsInstance(transport, beaver.transports.kinesis_transport.KinesisTransport)
transport.interrupt()
@mock_kinesis
def test_kinesis_send_stream(self):
self._create_streams()
self.beaver_config.set('kinesis_aws_stream', 'stream1')
self.beaver_config.set('kinesis_aws_profile_name', None)
self.beaver_config.set('kinesis_aws_access_key', None)
self.beaver_config.set('kinesis_aws_secret_key', None)
self.beaver_config.set('kinesis_bulk_lines', False)
transport = create_transport(self.beaver_config, logger=self.logger)
mock_send_batch = mock.Mock()
transport._send_message_batch = mock_send_batch
self.assertIsInstance(transport, beaver.transports.kinesis_transport.KinesisTransport)
data = {}
lines = []
n=500
for i in range(n):
lines.append('log' + str(i) + '\n')
new_lines = []
for line in lines:
message = unicode_dammit(line)
if len(message) == 0:
continue
new_lines.append(message)
data['lines'] = new_lines
data['fields'] = []
self.assertTrue(transport.callback("test.log", **data))
self.assertEqual(1, mock_send_batch.call_count)
@mock_kinesis
def test_kinesis_send_stream_with_record_count_cutoff(self):
self._create_streams()
self.beaver_config.set('kinesis_aws_stream', 'stream1')
self.beaver_config.set('kinesis_aws_profile_name', None)
self.beaver_config.set('kinesis_aws_access_key', None)
self.beaver_config.set('kinesis_aws_secret_key', None)
self.beaver_config.set('kinesis_bulk_lines', False)
transport = create_transport(self.beaver_config, logger=self.logger)
mock_send_batch = mock.Mock()
transport._send_message_batch = mock_send_batch
self.assertIsInstance(transport, beaver.transports.kinesis_transport.KinesisTransport)
data = {}
lines = []
n = 501
for i in range(n):
lines.append('log' + str(i) + '\n')
new_lines = []
for line in lines:
message = unicode_dammit(line)
if len(message) == 0:
continue
new_lines.append(message)
data['lines'] = new_lines
data['fields'] = []
self.assertTrue(transport.callback("test.log", **data))
self.assertEqual(2, mock_send_batch.call_count)
|
ba99a4218b8a6acee049fd3c2e9ef71794961369
|
0841643267b9fc1478f6e3d21bfccb17aba67af6
|
/gs_quant/backtests/data_sources.py
|
21f88beec244b46358640e258a9e1598ae11235f
|
[
"Apache-2.0"
] |
permissive
|
goldmansachs/gs-quant
|
55618e0e4e961d4ee50b7393f27c258e2647a957
|
4cf8ec75c4d85b16ec08371c46cc1a9ede9d72a2
|
refs/heads/master
| 2023-08-20T00:55:43.324547
| 2023-08-16T16:55:22
| 2023-08-16T16:55:22
| 161,840,815
| 2,088
| 596
|
Apache-2.0
| 2023-08-16T16:55:23
| 2018-12-14T21:10:40
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 6,958
|
py
|
data_sources.py
|
"""
Copyright 2019 Goldman Sachs.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
"""
import datetime as dt
from enum import Enum
import numpy as np
import pandas as pd
import pytz
from typing import Union, Iterable
from gs_quant.backtests.core import ValuationFixingType
from gs_quant.data import DataFrequency, Dataset
from gs_quant.instrument import Instrument
class MissingDataStrategy(Enum):
fill_forward = 'fill_forward'
interpolate = 'interpolate'
fail = 'fail'
class DataSource:
def get_data(self, state):
raise RuntimeError("Implemented by subclass")
class GsDataSource(DataSource):
def __init__(self, data_set: str, asset_id: str, min_date: dt.date = None, max_date: dt.date = None,
value_header: str = 'rate'):
self._data_set = data_set
self._asset_id = asset_id
self._min_date = min_date
self._max_date = max_date
self._value_header = value_header
self._loaded_data = None
def get_data(self, state: Union[dt.date, dt.datetime] = None):
if self._loaded_data is None:
ds = Dataset(self._data_set)
if self._min_date:
self._loaded_data = ds.get_data(self._min_date, self._max_date, assetId=(self._asset_id,))
else:
return ds.get_data(state, state, assetId=(self._asset_id,))[self._value_header]
return self._loaded_data[self._value_header].at[pd.to_datetime(state)]
class GenericDataSource(DataSource):
def __init__(self, data_set: pd.Series, missing_data_strategy: MissingDataStrategy = MissingDataStrategy.fail):
"""
A data source which holds a pandas series indexed by date or datetime
:param data_set: a pandas dataframe indexed by date or datetime
:param missing_data_strategy: MissingDataStrategy which defines behaviour if data is missing, will only take
effect if using get_data, gat_data_range has no expectations of the number of
expected data points.
"""
self._data_set = data_set
self._missing_data_strategy = missing_data_strategy
self._tz_aware = isinstance(self._data_set.index[0],
dt.datetime) and self._data_set.index[0].tzinfo is not None
if self._missing_data_strategy == MissingDataStrategy.interpolate:
self._data_set.interpolate()
elif self._missing_data_strategy == MissingDataStrategy.fill_forward:
self._data_set.ffill()
def get_data(self, state: Union[dt.date, dt.datetime, Iterable]):
"""
Get the value of the dataset at a time or date. If a list of dates or times is provided return the avg value
:param state: a date, datetime or a list of dates or datetimes
:return: float value
"""
if isinstance(state, Iterable):
return [self.get_data(i) for i in state]
if self._tz_aware and (state.tzinfo is None or state.tzinfo.utcoffset(state) is None):
state = pytz.utc.localize(state)
if pd.Timestamp(state) in self._data_set:
return self._data_set[pd.Timestamp(state)]
elif state in self._data_set or self._missing_data_strategy == MissingDataStrategy.fail:
return self._data_set[state]
else:
if isinstance(self._data_set.index, pd.DatetimeIndex):
self._data_set.at[pd.to_datetime(state)] = np.nan
self._data_set.sort_index(inplace=True)
else:
self._data_set.at[state] = np.nan
self._data_set.sort_index()
if self._missing_data_strategy == MissingDataStrategy.interpolate:
self._data_set = self._data_set.interpolate()
elif self._missing_data_strategy == MissingDataStrategy.fill_forward:
self._data_set = self._data_set.ffill()
else:
raise RuntimeError(f'unrecognised missing data strategy: {str(self._missing_data_strategy)}')
return self._data_set[pd.to_datetime(state)] if isinstance(self._data_set.index,
pd.DatetimeIndex) else self._data_set[state]
def get_data_range(self, start: Union[dt.date, dt.datetime],
end: Union[dt.date, dt.datetime, int]):
"""
get a range of values from the dataset.
:param start: a date or datetime
:param end: a date, datetime or an int. If an int is provided we return that many data points back from the
start date
:return: pd.Series
"""
if isinstance(end, int):
return self._data_set.loc[self._data_set.index < start].tail(end)
return self._data_set.loc[(start < self._data_set.index) & (self._data_set.index <= end)]
class DataManager:
def __init__(self):
self._data_sources = {}
def add_data_source(self, series: Union[pd.Series, DataSource], data_freq: DataFrequency,
instrument: Instrument, valuation_type: ValuationFixingType):
if not isinstance(series, DataSource) and not len(series):
return
if instrument.name is None:
raise RuntimeError('Please add a name identify your instrument')
key = (data_freq, instrument.name, valuation_type)
if key in self._data_sources:
raise RuntimeError('A dataset with this frequency instrument name and valuation type already added to '
'Data Manager')
self._data_sources[key] = GenericDataSource(series) if isinstance(series, pd.Series) else series
def get_data(self, state: Union[dt.date, dt.datetime], instrument: Instrument, valuation_type: ValuationFixingType):
key = (DataFrequency.REAL_TIME if isinstance(state, dt.datetime) else DataFrequency.DAILY,
instrument.name.split('_')[-1], valuation_type)
return self._data_sources[key].get_data(state)
def get_data_range(self, start: Union[dt.date, dt.datetime],
end: Union[dt.date, dt.datetime], instrument: Instrument, valuation_type: ValuationFixingType):
key = (DataFrequency.REAL_TIME if isinstance(start, dt.datetime) else DataFrequency.DAILY,
instrument.name.split('_')[-1], valuation_type)
return self._data_sources[key].get_data_range(start, end)
|
13e41dbe78ab93e0aa2801421ec46742214bec07
|
64ab5b65afdf8d950c4b56ad2259133b95fc2fec
|
/zeus/api/resources/__init__.py
|
67ecb6af9e16bc3040d0ebaec6ff695cffb0e5ef
|
[
"Apache-2.0"
] |
permissive
|
getsentry/zeus
|
3e88895443b23278fdb4c25121422ee214630512
|
6d4a490c19ebe406b551641a022ca08f26c21fcb
|
refs/heads/master
| 2023-09-01T14:20:11.396306
| 2021-04-30T17:08:33
| 2021-04-30T17:08:33
| 96,131,433
| 222
| 27
|
Apache-2.0
| 2022-06-01T03:17:16
| 2017-07-03T16:39:35
|
Python
|
UTF-8
|
Python
| false
| false
| 2,092
|
py
|
__init__.py
|
from .artifact_download import * # NOQA
from .auth_index import * # NOQA
from .build_artifacts import * # NOQA
from .build_bundlestats import * # NOQA
from .build_details import * # NOQA
from .build_diff import * # NOQA
from .build_failures import * # NOQA
from .build_file_coverage_tree import * # NOQA
from .build_file_coverage import * # NOQA
from .build_index import * # NOQA
from .build_jobs import * # NOQA
from .build_styleviolations import * # NOQA
from .build_tests import * # NOQA
from .catchall import * # NOQA
from .change_request_details import * # NOQA
from .change_request_index import * # NOQA
from .github_organizations import * # NOQA
from .github_repositories import * # NOQA
from .hook_details import * # NOQA
from .index import * # NOQA
from .install_index import * # NOQA
from .install_stats import * # NOQA
from .job_artifacts import * # NOQA
from .job_details import * # NOQA
from .job_tests import * # NOQA
from .repository_branches import * # NOQA
from .repository_builds import * # NOQA
from .repository_change_requests import * # NOQA
from .repository_details import * # NOQA
from .repository_file_coverage_tree import * # NOQA
from .repository_hooks import * # NOQA
from .repository_index import * # NOQA
from .repository_revisions import * # NOQA
from .repository_stats import * # NOQA
from .repository_test_details import * # NOQA
from .repository_test_history import * # NOQA
from .repository_tests import * # NOQA
from .repository_testtree import * # NOQA
from .revision_artifacts import * # NOQA
from .revision_bundlestats import * # NOQA
from .revision_details import * # NOQA
from .revision_diff import * # NOQA
from .revision_failures import * # NOQA
from .revision_file_coverage_tree import * # NOQA
from .revision_file_coverage import * # NOQA
from .revision_jobs import * # NOQA
from .revision_styleviolations import * # NOQA
from .revision_tests import * # NOQA
from .test_details import * # NOQA
from .user_details import * # NOQA
from .user_emails import * # NOQA
from .user_token import * # NOQA
|
f0aac02ebce6f4caed26adc876096572f2cc3e6a
|
2d05050d0ada29f7680b4df20c10bb85b0530e45
|
/tests/python/contrib/test_hexagon/topi/test_add_subtract_multiply.py
|
94cb5ffca543a731a9e313c5d91c028bdaf1cdcc
|
[
"Apache-2.0",
"BSD-3-Clause",
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"Unlicense",
"Zlib",
"LLVM-exception",
"BSD-2-Clause"
] |
permissive
|
apache/tvm
|
87cb617f9a131fa44e1693303aaddf70e7a4c403
|
d75083cd97ede706338ab413dbc964009456d01b
|
refs/heads/main
| 2023-09-04T11:24:26.263032
| 2023-09-04T07:26:00
| 2023-09-04T07:26:00
| 70,746,484
| 4,575
| 1,903
|
Apache-2.0
| 2023-09-14T19:06:33
| 2016-10-12T22:20:28
|
Python
|
UTF-8
|
Python
| false
| false
| 13,076
|
py
|
test_add_subtract_multiply.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test code for Add, Subtract and Multiply."""
import numpy as np
import tvm
from tvm import te
import tvm.topi.hexagon.slice_ops as sl
import tvm.topi.hexagon.qnn as qn
from tvm.contrib.hexagon import allocate_hexagon_array
from ..infrastructure import (
transform_numpy,
quantize_np,
get_hexagon_target,
)
ZERO_POINT_A_VAL = None
SCALE_A_VAL = None
ZERO_POINT_B_VAL = None
SCALE_B_VAL = None
ZERO_POINT_M_VAL = None
SCALE_M_VAL = None
def hexagon_wrapper_allocation(
device,
layout,
axis_separators,
tensor_shape=None,
data_original=None,
transformed_data=None,
dtype=None,
):
"""Input layout can either be nhwc-8h2w32c2w-2d or nhwc"""
if layout in ["nhwc-8h2w32c2w-2d", "nhwc-8h8w32c-2d"]:
data_nd = allocate_hexagon_array(
device,
tensor_shape=tensor_shape,
data=transformed_data,
dtype=dtype,
axis_separators=axis_separators,
mem_scope="global.vtcm",
)
elif layout == "nhwc":
data_nd = allocate_hexagon_array(
device,
data=data_original,
)
return data_nd
class TestAddSubtractMultiplyBroadcast2d:
"""Test Add, Subtract and Multiply class."""
(
input_shape_a,
input_shape_b,
input_a_layout,
input_b_layout,
output_layout,
dtype,
) = tvm.testing.parameters(
# no broadcast needed - short input
(
[1, 8, 4, 32],
[1, 8, 4, 32],
"nhwc-8h2w32c2w-2d",
"nhwc-8h2w32c2w-2d",
"nhwc-8h2w32c2w-2d",
"float16",
),
# no broadcast needed - large input
(
[1, 56, 64, 128],
[1, 56, 64, 128],
"nhwc-8h2w32c2w-2d",
"nhwc-8h2w32c2w-2d",
"nhwc-8h2w32c2w-2d",
"float16",
),
# one input needs broadcast
(
[1, 56, 64, 128],
[1, 1, 64, 1],
"nhwc-8h2w32c2w-2d",
"nhwc",
"nhwc-8h2w32c2w-2d",
"float16",
),
# Both input needs broadcast
(
[1, 56, 1, 128],
[1, 1, 64, 1],
"nhwc",
"nhwc",
"nhwc-8h2w32c2w-2d",
"float16",
),
# One axis in one input needs broadcast
(
[1, 56, 20, 128],
[1, 56, 20, 1],
"nhwc-8h2w32c2w-2d",
"nhwc",
"nhwc-8h2w32c2w-2d",
"float16",
),
# broadcast all axes in one input
(
[1, 48, 56, 32],
[1, 1, 1, 1],
"nhwc-8h2w32c2w-2d",
"nhwc",
"nhwc-8h2w32c2w-2d",
"float16",
),
(
[1, 48, 32, 64],
[1, 48, 32, 64],
"nhwc-8h8w32c-2d",
"nhwc-8h8w32c-2d",
"nhwc-8h8w32c-2d",
"uint8",
),
# broadcast axis 2 in one input
(
[1, 48, 32, 64],
[1, 48, 1, 64],
"nhwc-8h8w32c-2d",
"nhwc",
"nhwc-8h8w32c-2d",
"uint8",
),
# broadcast axis 1 in one input
(
[1, 48, 32, 64],
[1, 1, 32, 64],
"nhwc-8h8w32c-2d",
"nhwc",
"nhwc-8h8w32c-2d",
"uint8",
),
# broadcast axis 3 in one input
(
[1, 8, 8, 32],
[1, 8, 8, 1],
"nhwc-8h8w32c-2d",
"nhwc",
"nhwc-8h8w32c-2d",
"uint8",
),
# broadcast both inputs
(
[1, 56, 1, 128],
[1, 1, 64, 1],
"nhwc",
"nhwc",
"nhwc-8h8w32c-2d",
"uint8",
),
# broadcast both inputs
(
[1, 48, 1, 1],
[1, 1, 32, 32],
"nhwc",
"nhwc",
"nhwc-8h8w32c-2d",
"uint8",
),
# broadcast both inputs
(
[1, 48, 1, 32],
[1, 1, 32, 1],
"nhwc",
"nhwc",
"nhwc-8h8w32c-2d",
"uint8",
),
# broadcast all axes in one input
(
[1, 48, 56, 32],
[1, 1, 1, 1],
"nhwc-8h8w32c-2d",
"nhwc",
"nhwc-8h8w32c-2d",
"uint8",
),
)
op_name = tvm.testing.parameter("add", "subtract", "multiply")
@tvm.testing.fixture
def expected_output_np(self, input_np_a, input_np_b, op_name):
"""Generate expected output."""
if op_name == "add":
out_ref = np.add(input_np_a, input_np_b)
elif op_name == "subtract":
out_ref = np.subtract(input_np_a, input_np_b)
elif op_name == "multiply":
out_ref = np.multiply(input_np_a, input_np_b)
return out_ref
@tvm.testing.fixture
def transformed_expected_output_np(self, expected_output_np, output_layout, dtype):
"""Generate expected output."""
if dtype == "float16":
return transform_numpy(expected_output_np, "nhwc", output_layout)
if dtype in ["uint8", "int8"]:
global ZERO_POINT_M_VAL, SCALE_M_VAL
out_ref_quantized, SCALE_M_VAL, ZERO_POINT_M_VAL = quantize_np(
expected_output_np, dtype
)
return transform_numpy(out_ref_quantized, "nhwc", output_layout)
raise RuntimeError(f"Unsupported data type '{dtype}'")
@tvm.testing.fixture
def input_np_a(self, input_shape_a, dtype):
"""Generate numpy input for variable a."""
if dtype in ["uint8", "int8"]:
dtype = "float32"
return np.random.random(input_shape_a).astype(dtype)
@tvm.testing.fixture
def input_np_b(self, input_shape_b, dtype):
"""Generate numpy input for variable b."""
if dtype in ["uint8", "int8"]:
dtype = "float32"
return np.random.random(input_shape_b).astype(dtype)
@tvm.testing.fixture
def quantize_input_np_a(self, input_np_a, dtype):
if dtype in ["uint8", "int8"]:
global ZERO_POINT_A_VAL, SCALE_A_VAL
input_np_a_quantized, SCALE_A_VAL, ZERO_POINT_A_VAL = quantize_np(input_np_a, dtype)
return input_np_a_quantized
return None
@tvm.testing.fixture
def quantize_input_np_b(self, input_np_b, dtype):
if dtype in ["uint8", "int8"]:
global ZERO_POINT_B_VAL, SCALE_B_VAL
input_np_b_quantized, SCALE_B_VAL, ZERO_POINT_B_VAL = quantize_np(input_np_b, dtype)
return input_np_b_quantized
return None
@tvm.testing.fixture
def transformed_input_np_a(self, input_np_a, quantize_input_np_a, input_a_layout, dtype):
if dtype == "float16":
return transform_numpy(input_np_a, "nhwc", input_a_layout)
if dtype in ["uint8", "int8"]:
return transform_numpy(quantize_input_np_a, "nhwc", input_a_layout)
raise RuntimeError(f"Unsupported data type '{dtype}'")
@tvm.testing.fixture
def transformed_input_np_b(self, input_np_b, quantize_input_np_b, input_b_layout, dtype):
if dtype == "float16":
return transform_numpy(input_np_b, "nhwc", input_b_layout)
if dtype in ["uint8", "int8"]:
return transform_numpy(quantize_input_np_b, "nhwc", input_b_layout)
raise RuntimeError(f"Unsupported data type '{dtype}'")
@tvm.testing.requires_hexagon
def test_transform(
self,
dtype,
input_shape_a,
input_shape_b,
input_np_a,
input_np_b,
quantize_input_np_a,
quantize_input_np_b,
transformed_input_np_a,
transformed_input_np_b,
expected_output_np,
transformed_expected_output_np,
hexagon_session,
output_layout,
input_a_layout,
input_b_layout,
op_name,
):
"""Test transform."""
output_shape = expected_output_np.shape
a_tensor = te.placeholder(input_shape_a, name="a_tensor", dtype=dtype)
b_tensor = te.placeholder(input_shape_b, name="b_tensor", dtype=dtype)
if dtype == "float16":
if op_name == "add":
m_tensor = sl.add_broadcast_compute(a_tensor, b_tensor)
elif op_name == "subtract":
m_tensor = sl.subtract_broadcast_compute(a_tensor, b_tensor)
elif op_name == "multiply":
m_tensor = sl.multiply_broadcast_compute(a_tensor, b_tensor)
tir_schedule = sl.tir_broadcast_schedule(
m_tensor, a_tensor, b_tensor, output_layout, input_a_layout, input_b_layout, op_name
)
elif dtype in ["uint8", "int8"]:
args = [
a_tensor,
b_tensor,
output_shape,
ZERO_POINT_A_VAL,
SCALE_A_VAL,
ZERO_POINT_B_VAL,
SCALE_B_VAL,
ZERO_POINT_M_VAL,
SCALE_M_VAL,
dtype,
]
if op_name == "add":
m_tensor = qn.qadd_broadcast_compute(*args)
elif op_name == "subtract":
m_tensor = qn.qsubtract_broadcast_compute(*args)
elif op_name == "multiply":
m_tensor = qn.qmultiply_broadcast_compute(*args)
tir_schedule = qn.tir_schedule_quant(
m_tensor, a_tensor, b_tensor, output_layout, input_a_layout, input_b_layout
)
sch = tir_schedule.mod
input_axis_separator = [4]
if output_layout in (
"nhwc-8h2w32c2w-2d",
"nhwc-8h8w32c-2d",
):
output_axis_separator = [4]
else:
raise RuntimeError(f"Unexpected layout '{output_layout}'")
with tvm.transform.PassContext(opt_level=3):
func = tvm.build(
sch,
[a_tensor, b_tensor, m_tensor],
get_hexagon_target("v69"),
name="slice_op_with_transform",
)
if dtype == "float16":
in_data_np_a = input_np_a
in_data_np_b = input_np_b
elif dtype in ["int8", "uint8"]:
in_data_np_a = quantize_input_np_a
in_data_np_b = quantize_input_np_b
else:
raise RuntimeError(f"Unsupport dtype '{dtype}'")
a_data_nd = hexagon_wrapper_allocation(
hexagon_session.device,
layout=input_a_layout,
data_original=in_data_np_a,
transformed_data=transformed_input_np_a,
axis_separators=input_axis_separator,
)
b_data_nd = hexagon_wrapper_allocation(
hexagon_session.device,
layout=input_b_layout,
data_original=in_data_np_b,
transformed_data=transformed_input_np_b,
axis_separators=input_axis_separator,
)
m_data_nd = hexagon_wrapper_allocation(
hexagon_session.device,
layout=output_layout,
tensor_shape=transformed_expected_output_np.shape,
axis_separators=output_axis_separator,
dtype=dtype,
)
mod = hexagon_session.load_module(func)
mod(a_data_nd, b_data_nd, m_data_nd)
batch, height, width, channel = output_shape
# convert nd to np and reshape to fixed chunk size layout
if output_layout == "nhwc-8h2w32c2w-2d":
m_data_np = m_data_nd.numpy().reshape(
[batch, height // 8, width // 4, channel // 32, 8, 2, 32, 2]
)
elif output_layout == "nhwc-8h8w32c-2d":
m_data_np = m_data_nd.numpy().reshape(
[batch, height // 8, width // 8, channel // 32, 8, 8, 32]
)
if dtype == "float16":
np.testing.assert_allclose(
transformed_expected_output_np, m_data_np, rtol=1e-3, atol=1e-3
)
elif dtype in ["int8", "uint8"]:
np.testing.assert_allclose(transformed_expected_output_np, m_data_np, rtol=1, atol=1)
if __name__ == "__main__":
tvm.testing.main()
|
f83f02f00ba1369326a1dfd3cc298dd485c61653
|
69d8d91954f6623f3674d52d734d589f72383628
|
/openstack_dashboard/dashboards/admin/volume_types/qos_specs/forms.py
|
c71f19c3cf33a6be263067d8b8a273844fc916bd
|
[
"Apache-2.0"
] |
permissive
|
openstack/horizon
|
d031cebe126c06ad9717bbc52790b3d890e8661e
|
7896fd8c77a6766a1156a520946efaf792b76ca5
|
refs/heads/master
| 2023-09-04T06:57:58.069907
| 2023-09-01T20:17:10
| 2023-09-01T20:17:10
| 2,665,166
| 1,060
| 1,175
|
Apache-2.0
| 2023-08-07T02:33:44
| 2011-10-28T13:12:05
|
Python
|
UTF-8
|
Python
| false
| false
| 3,337
|
py
|
forms.py
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import re
from django.urls import reverse
from django.utils.translation import gettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import messages
from openstack_dashboard import api
KEY_NAME_REGEX = re.compile(r"^[a-zA-Z0-9-_:. /]+$", re.UNICODE)
KEY_ERROR_MESSAGES = {
'invalid': _("The key must match the following the regex: "
"'^[a-zA-Z0-9-_:. /]'")}
class CreateKeyValuePair(forms.SelfHandlingForm):
# this if for creating a spec key-value pair for an existing QOS Spec
key = forms.RegexField(max_length=255, label=_("Key"),
regex=KEY_NAME_REGEX,
error_messages=KEY_ERROR_MESSAGES)
value = forms.CharField(max_length=255, label=_("Value"))
def handle(self, request, data):
qos_spec_id = self.initial['qos_spec_id']
try:
# first retrieve current value of specs
specs = api.cinder.qos_spec_get(request, qos_spec_id)
# now add new key-value pair to list of specs
specs.specs[data['key']] = data['value']
api.cinder.qos_spec_set_keys(request,
qos_spec_id,
specs.specs)
msg = _('Created spec "%s".') % data['key']
messages.success(request, msg)
return True
except Exception:
redirect = reverse("horizon:admin:volume_types:index")
exceptions.handle(request,
_("Unable to create spec."),
redirect=redirect)
class EditKeyValuePair(forms.SelfHandlingForm):
value = forms.CharField(max_length=255, label=_("Value"))
# update the backend with the new qos spec value
def handle(self, request, data):
key = self.initial['key']
qos_spec_id = self.initial['qos_spec_id']
# build up new 'specs' object with all previous values plus new value
try:
# first retrieve current value of specs
specs = api.cinder.qos_spec_get_keys(request,
qos_spec_id,
raw=True)
specs.specs[key] = data['value']
api.cinder.qos_spec_set_keys(request,
qos_spec_id,
specs.specs)
msg = _('Saved spec "%s".') % key
messages.success(request, msg)
return True
except Exception:
redirect = reverse("horizon:admin:volume_types:index")
exceptions.handle(request,
_("Unable to edit spec."),
redirect=redirect)
|
2133c0da23cbabb8996cf130e06ce45171c183a7
|
a41e1498e3c080f47abd8e8e57157548df3ebbf1
|
/pandas/tests/series/methods/test_to_csv.py
|
76ca05a60eb7a02c4d71b4b8c68b410edb2d1fe5
|
[
"BSD-3-Clause"
] |
permissive
|
pandas-dev/pandas
|
e7e639454a298bebc272622e66faa9829ea393bb
|
c7325d7e7e77ecb4a4e57b48bc25265277c75712
|
refs/heads/main
| 2023-09-01T12:42:07.927176
| 2023-09-01T11:14:10
| 2023-09-01T11:14:10
| 858,127
| 36,166
| 18,728
|
BSD-3-Clause
| 2023-09-14T21:18:41
| 2010-08-24T01:37:33
|
Python
|
UTF-8
|
Python
| false
| false
| 6,332
|
py
|
test_to_csv.py
|
from datetime import datetime
from io import StringIO
import numpy as np
import pytest
import pandas as pd
from pandas import Series
import pandas._testing as tm
from pandas.io.common import get_handle
class TestSeriesToCSV:
def read_csv(self, path, **kwargs):
params = {"index_col": 0, "header": None}
params.update(**kwargs)
header = params.get("header")
out = pd.read_csv(path, **params).squeeze("columns")
if header is None:
out.name = out.index.name = None
return out
def test_from_csv(self, datetime_series, string_series):
# freq doesn't round-trip
datetime_series.index = datetime_series.index._with_freq(None)
with tm.ensure_clean() as path:
datetime_series.to_csv(path, header=False)
ts = self.read_csv(path, parse_dates=True)
tm.assert_series_equal(datetime_series, ts, check_names=False)
assert ts.name is None
assert ts.index.name is None
# see gh-10483
datetime_series.to_csv(path, header=True)
ts_h = self.read_csv(path, header=0)
assert ts_h.name == "ts"
string_series.to_csv(path, header=False)
series = self.read_csv(path)
tm.assert_series_equal(string_series, series, check_names=False)
assert series.name is None
assert series.index.name is None
string_series.to_csv(path, header=True)
series_h = self.read_csv(path, header=0)
assert series_h.name == "series"
with open(path, "w", encoding="utf-8") as outfile:
outfile.write("1998-01-01|1.0\n1999-01-01|2.0")
series = self.read_csv(path, sep="|", parse_dates=True)
check_series = Series(
{datetime(1998, 1, 1): 1.0, datetime(1999, 1, 1): 2.0}
)
tm.assert_series_equal(check_series, series)
series = self.read_csv(path, sep="|", parse_dates=False)
check_series = Series({"1998-01-01": 1.0, "1999-01-01": 2.0})
tm.assert_series_equal(check_series, series)
def test_to_csv(self, datetime_series):
with tm.ensure_clean() as path:
datetime_series.to_csv(path, header=False)
with open(path, newline=None, encoding="utf-8") as f:
lines = f.readlines()
assert lines[1] != "\n"
datetime_series.to_csv(path, index=False, header=False)
arr = np.loadtxt(path)
tm.assert_almost_equal(arr, datetime_series.values)
def test_to_csv_unicode_index(self):
buf = StringIO()
s = Series(["\u05d0", "d2"], index=["\u05d0", "\u05d1"])
s.to_csv(buf, encoding="UTF-8", header=False)
buf.seek(0)
s2 = self.read_csv(buf, index_col=0, encoding="UTF-8")
tm.assert_series_equal(s, s2)
def test_to_csv_float_format(self):
with tm.ensure_clean() as filename:
ser = Series([0.123456, 0.234567, 0.567567])
ser.to_csv(filename, float_format="%.2f", header=False)
rs = self.read_csv(filename)
xp = Series([0.12, 0.23, 0.57])
tm.assert_series_equal(rs, xp)
def test_to_csv_list_entries(self):
s = Series(["jack and jill", "jesse and frank"])
split = s.str.split(r"\s+and\s+")
buf = StringIO()
split.to_csv(buf, header=False)
def test_to_csv_path_is_none(self):
# GH 8215
# Series.to_csv() was returning None, inconsistent with
# DataFrame.to_csv() which returned string
s = Series([1, 2, 3])
csv_str = s.to_csv(path_or_buf=None, header=False)
assert isinstance(csv_str, str)
@pytest.mark.parametrize(
"s,encoding",
[
(
Series([0.123456, 0.234567, 0.567567], index=["A", "B", "C"], name="X"),
None,
),
# GH 21241, 21118
(Series(["abc", "def", "ghi"], name="X"), "ascii"),
(Series(["123", "你好", "世界"], name="中文"), "gb2312"),
(
Series(["123", "Γειά σου", "Κόσμε"], name="Ελληνικά"), # noqa: RUF001
"cp737",
),
],
)
def test_to_csv_compression(self, s, encoding, compression):
with tm.ensure_clean() as filename:
s.to_csv(filename, compression=compression, encoding=encoding, header=True)
# test the round trip - to_csv -> read_csv
result = pd.read_csv(
filename,
compression=compression,
encoding=encoding,
index_col=0,
).squeeze("columns")
tm.assert_series_equal(s, result)
# test the round trip using file handle - to_csv -> read_csv
with get_handle(
filename, "w", compression=compression, encoding=encoding
) as handles:
s.to_csv(handles.handle, encoding=encoding, header=True)
result = pd.read_csv(
filename,
compression=compression,
encoding=encoding,
index_col=0,
).squeeze("columns")
tm.assert_series_equal(s, result)
# explicitly ensure file was compressed
with tm.decompress_file(filename, compression) as fh:
text = fh.read().decode(encoding or "utf8")
assert s.name in text
with tm.decompress_file(filename, compression) as fh:
tm.assert_series_equal(
s,
pd.read_csv(fh, index_col=0, encoding=encoding).squeeze("columns"),
)
def test_to_csv_interval_index(self):
# GH 28210
s = Series(["foo", "bar", "baz"], index=pd.interval_range(0, 3))
with tm.ensure_clean("__tmp_to_csv_interval_index__.csv") as path:
s.to_csv(path, header=False)
result = self.read_csv(path, index_col=0)
# can't roundtrip intervalindex via read_csv so check string repr (GH 23595)
expected = s.copy()
expected.index = expected.index.astype(str)
tm.assert_series_equal(result, expected)
|
ba7e6b77066f5ecc4de1d423ed52324e72c3fd90
|
fbbe424559f64e9a94116a07eaaa555a01b0a7bb
|
/Spacy/source2.7/spacy/lang/id/norm_exceptions.py
|
cb168dfeb7f623f79600ccbcb6c474a014a32b38
|
[
"MIT"
] |
permissive
|
ryfeus/lambda-packs
|
6544adb4dec19b8e71d75c24d8ed789b785b0369
|
cabf6e4f1970dc14302f87414f170de19944bac2
|
refs/heads/master
| 2022-12-07T16:18:52.475504
| 2022-11-29T13:35:35
| 2022-11-29T13:35:35
| 71,386,735
| 1,283
| 263
|
MIT
| 2022-11-26T05:02:14
| 2016-10-19T18:22:39
|
Python
|
UTF-8
|
Python
| false
| false
| 297
|
py
|
norm_exceptions.py
|
# coding: utf8
from __future__ import unicode_literals
_exc = {
"Rp": "$",
"IDR": "$",
"RMB": "$",
"USD": "$",
"AUD": "$",
"GBP": "$",
}
NORM_EXCEPTIONS = {}
for string, norm in _exc.items():
NORM_EXCEPTIONS[string] = norm
NORM_EXCEPTIONS[string.title()] = norm
|
1660127964196ace001c495054d720809fe7cba8
|
f9f074c44b67a11d4630b5e1cc15e016e8d73cc8
|
/factory-ai-vision/EdgeSolution/modules/WebModule/backend/vision_on_edge/feedback/apps.py
|
f80490c8f69bcb66142e73ce907a07d2f8bb38f2
|
[
"MIT"
] |
permissive
|
Azure-Samples/azure-intelligent-edge-patterns
|
361694680c7e48d3761c5416175788355b684dcd
|
1d2f42cbf9f21157c1e1abf044b26160dfed5b16
|
refs/heads/master
| 2023-05-26T13:15:47.085088
| 2023-02-28T17:25:53
| 2023-02-28T17:25:53
| 186,706,933
| 193
| 164
|
MIT
| 2023-02-28T17:25:55
| 2019-05-14T22:02:41
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 142
|
py
|
apps.py
|
"""App.
"""
from django.apps import AppConfig
class FeedbackConfig(AppConfig):
"""App Config."""
name = "vision_on_edge.feedback"
|
8351357b3aa2ea8fac3a2660b1f46b8d2ee02884
|
9e1f60a867f66b1f4e4fc84fa4252c581e5e1a36
|
/Chapter10/service/libs/web/setup.py
|
830e43c309acc64d08585b312b94b8505fd087eb
|
[
"MIT"
] |
permissive
|
PacktPublishing/Clean-Code-in-Python
|
c216e002485b8cd7736f97b59215a3930f35359a
|
7348d0f9f42871f499b352e0696e0cef51c4f8c6
|
refs/heads/master
| 2023-06-10T13:40:33.331115
| 2023-05-30T17:48:09
| 2023-05-30T17:48:09
| 145,072,942
| 523
| 181
|
MIT
| 2023-05-30T17:48:10
| 2018-08-17T04:48:38
|
Python
|
UTF-8
|
Python
| false
| false
| 450
|
py
|
setup.py
|
from setuptools import find_packages, setup
with open("README.rst", "r") as longdesc:
long_description = longdesc.read()
install_requires = ["sanic"]
setup(
name="web",
description="Library with helpers for the web-related functionality",
long_description=long_description,
author="Dev team",
version="0.1.0",
packages=find_packages(where="src/"),
package_dir={"": "src"},
install_requires=install_requires,
)
|
cfc8dc00c143f4f3809730df3044986b311d24e1
|
eb9f655206c43c12b497c667ba56a0d358b6bc3a
|
/python/testData/refactoring/rename/overloadsAndImplementationInImportedModuleRenameCall/after/a.py
|
70e147726e0e433745ea08a218370d821edbb8a2
|
[
"Apache-2.0"
] |
permissive
|
JetBrains/intellij-community
|
2ed226e200ecc17c037dcddd4a006de56cd43941
|
05dbd4575d01a213f3f4d69aa4968473f2536142
|
refs/heads/master
| 2023-09-03T17:06:37.560889
| 2023-09-03T11:51:00
| 2023-09-03T12:12:27
| 2,489,216
| 16,288
| 6,635
|
Apache-2.0
| 2023-09-12T07:41:58
| 2011-09-30T13:33:05
| null |
UTF-8
|
Python
| false
| false
| 27
|
py
|
a.py
|
from b import bar
bar("5")
|
627563a06ce61eec50ae556a3af7942f46d7e9f4
|
867364dc92d3236f5b42aa4fe82ee69d008d09e5
|
/insomniac/actions_types.py
|
7633434c0521fc1b686df9d95cfff6acb57be8ae
|
[
"MIT"
] |
permissive
|
alexal1/Insomniac
|
6acde5a6e4b4d50e4e0d4fb233fb2e0f98d52314
|
03e25aeaae5b38a0e47a4dfd705a3140ff2e8086
|
refs/heads/master
| 2023-09-03T16:56:23.546483
| 2022-09-03T14:21:08
| 2022-09-03T14:21:08
| 268,484,843
| 666
| 194
|
MIT
| 2022-03-01T23:12:28
| 2020-06-01T09:55:52
|
Python
|
UTF-8
|
Python
| false
| false
| 1,608
|
py
|
actions_types.py
|
from collections import namedtuple
from enum import unique, Enum
GetProfileAction = namedtuple('GetProfileAction', 'user')
LikeAction = namedtuple('LikeAction', 'source_name source_type user')
FollowAction = namedtuple('FollowAction', 'source_name source_type user')
StoryWatchAction = namedtuple('StoryWatchAction', 'source_name source_type user')
CommentAction = namedtuple('CommentAction', 'source_name source_type user comment')
DirectMessageAction = namedtuple('DirectMessageAction', 'user message')
DirectMessageBackdateAction = namedtuple('DirectMessageAction', 'user message')
UnfollowAction = namedtuple('UnfollowAction', 'user')
ScrapeAction = namedtuple('ScrapeAction', 'source_name source_type user')
FilterAction = namedtuple('FilterAction', 'user')
InteractAction = namedtuple('InteractAction', 'source_name source_type user succeed')
RemoveMassFollowerAction = namedtuple('RemoveMassFollowerAction', 'user')
StartSessionAction = namedtuple('StartSessionAction', '')
@unique
class SourceType(Enum):
BLOGGER = "blogger"
HASHTAG = "hashtag"
PLACE = "place"
@unique
class BloggerInteractionType(Enum):
FOLLOWERS = 'followers'
FOLLOWING = 'following'
@unique
class HashtagInteractionType(Enum):
TOP_LIKERS = 'top-likers'
RECENT_LIKERS = 'recent-likers'
RECENT_POSTS = 'recent-posts'
TOP_POSTS = 'top-posts'
@unique
class PlaceInteractionType(Enum):
TOP_LIKERS = 'top-likers'
RECENT_LIKERS = 'recent-likers'
RECENT_POSTS = 'recent-posts'
TOP_POSTS = 'top-posts'
@unique
class TargetType(Enum):
URL = 'url'
USERNAME = 'username'
|
dc0dd5789f33143ae5f41a01456a23545054c6ac
|
c5378ac854725eff8e3fcda1f0ce4a36edf660ce
|
/tests/test_corpus.py
|
215693fe4b1f1c8602c29eb2a8371455f08f153e
|
[
"MIT"
] |
permissive
|
jakelever/kindred
|
e9f49a7f272ea3a9f9fdf5f141eaa9b97e4c7b0f
|
b6eac60fa40086b4c44e98e0baa34b760310d284
|
refs/heads/master
| 2023-03-16T21:09:34.777474
| 2023-03-12T00:12:05
| 2023-03-12T00:12:05
| 88,295,711
| 158
| 39
|
MIT
| 2022-09-28T12:13:39
| 2017-04-14T19:14:44
|
Python
|
UTF-8
|
Python
| false
| false
| 3,765
|
py
|
test_corpus.py
|
import kindred
from collections import Counter
def test_corpus_split():
mainCorpus = kindred.Corpus()
for i in range(100):
doc = kindred.Document(text=str(i))
mainCorpus.addDocument(doc)
corpusA,corpusB = mainCorpus.split(0.75)
assert len(corpusA.documents) == 75
assert len(corpusB.documents) == 25
seen = set()
for doc in corpusA.documents:
assert doc in mainCorpus.documents, "This document doesn't match an existing one"
assert not doc in seen, "This document isn't unique now"
seen.add(doc)
for doc in corpusB.documents:
assert doc in mainCorpus.documents, "This document doesn't match an existing one"
assert not doc in seen, "This document isn't unique now"
seen.add(doc)
assert len(seen) == len(mainCorpus.documents)
def test_corpus_nfold_split():
mainCorpus = kindred.Corpus()
docCount = 100
for i in range(docCount):
doc = kindred.Document(text=str(i))
mainCorpus.addDocument(doc)
corpusA,corpusB = mainCorpus.split(0.75)
folds = 5
trainCounter,testCounter = Counter(),Counter()
for trainCorpus,testCorpus in mainCorpus.nfold_split(folds):
assert len(trainCorpus.documents) == (folds-1) * docCount / folds
assert len(testCorpus.documents) == docCount / folds
seen = set()
for doc in corpusA.documents:
assert doc in mainCorpus.documents, "This document doesn't match an existing one"
assert not doc in seen, "This document isn't unique now"
trainCounter[doc] += 1
for doc in corpusB.documents:
assert doc in mainCorpus.documents, "This document doesn't match an existing one"
assert not doc in seen, "This document isn't unique now"
testCounter[doc] += 1
for doc,count in trainCounter.items():
assert count == folds
for doc,count in testCounter.items():
assert count == folds
def test_corpus_splitIntoSentences():
text = "<drug id='1'>Erlotinib</drug> is an <gene id='2'>EGFR</gene> inhibitor. <drug id='3'>Gefitinib</drug> is another drug. <relation type='inhibits' drug='1' gene='2' />"
corpus = kindred.Corpus(text,loadFromSimpleTag=True)
parser = kindred.Parser()
parser.parse(corpus)
sentenceCorpus = corpus.splitIntoSentences()
assert sentenceCorpus.parsed == True
assert isinstance(sentenceCorpus,kindred.Corpus)
assert len(sentenceCorpus.documents) == 2
expected1 = "<Document Erlotinib is an EGFR inhibitor. [<Entity drug:'Erlotinib' sourceid=1 [(0, 9)]>, <Entity gene:'EGFR' sourceid=2 [(16, 20)]>] [<Relation inhibits [<Entity drug:'Erlotinib' sourceid=1 [(0, 9)]>, <Entity gene:'EGFR' sourceid=2 [(16, 20)]>] ['drug', 'gene']>]>"
expected2 = "<Document Gefitinib is another drug. [<Entity drug:'Gefitinib' sourceid=3 [(0, 9)]>] []>"
assert str(sentenceCorpus.documents[0]) == expected1
assert str(sentenceCorpus.documents[1]) == expected2
doc0 = sentenceCorpus.documents[0]
assert len(doc0.sentences) == 1
sentence0 = doc0.sentences[0]
expectedTokens0 = "('Erlotinib', 0, 9),('is', 10, 12),('an', 13, 15),('EGFR', 16, 20),('inhibitor', 21, 30),('.', 30, 31)"
assert ",".join(str((t.word,t.startPos,t.endPos)) for t in sentence0.tokens).replace("u'","'") == expectedTokens0
assert len(sentence0.dependencies) == 6
assert str(sentence0.entityAnnotations) == "[(<Entity drug:'Erlotinib' sourceid=1 [(0, 9)]>, [0]), (<Entity gene:'EGFR' sourceid=2 [(16, 20)]>, [3])]"
doc1 = sentenceCorpus.documents[1]
assert len(doc1.sentences) == 1
sentence1 = doc1.sentences[0]
expectedTokens1 = "('Gefitinib', 0, 9),('is', 10, 12),('another', 13, 20),('drug', 21, 25),('.', 25, 26)"
assert ",".join(str((t.word,t.startPos,t.endPos)) for t in sentence1.tokens).replace("u'","'") == expectedTokens1
assert len(sentence1.dependencies) == 5
assert str(sentence1.entityAnnotations) == "[(<Entity drug:'Gefitinib' sourceid=3 [(0, 9)]>, [0])]"
|
5cd57ed43bd2c0831f47b4c20a3517552a244dff
|
420e7db695f82c7cf9d29735df956fa86bc0f14f
|
/actions/fragment.py
|
d942e57272a8789dda8771db8f37f55b0e612e17
|
[
"BSD-3-Clause"
] |
permissive
|
Kkevsterrr/geneva
|
bf929e3056dc6215bca079f1fd587866907a1cd5
|
6b091060ed0946b98a2ff9196dfbf93d85cbb28a
|
refs/heads/master
| 2023-08-23T22:30:49.750259
| 2023-05-18T21:24:14
| 2023-05-18T21:24:14
| 221,001,148
| 1,771
| 168
|
BSD-3-Clause
| 2023-05-26T10:04:58
| 2019-11-11T14:37:39
|
Python
|
UTF-8
|
Python
| false
| false
| 10,022
|
py
|
fragment.py
|
import random
from actions.action import Action
import layers.packet
from scapy.all import IP, TCP, fragment
MAX_UINT = 4294967295
class FragmentAction(Action):
"""
Defines the FragmentAction for Geneva - fragments or segments the given packet.
"""
frequency = 2
def __init__(self, environment_id=None, correct_order=None, fragsize=-1, segment=True, overlap=0):
"""
Initializes a fragment action object.
Args:
environment_id (str, optional): Environment ID of the strategy this object is a part of
correct_order (bool, optional): Whether or not the fragments/segments should be returned in the correct order
fragsize (int, optional): The index this packet should be cut. Defaults to -1, which cuts it in half.
segment (bool, optional): Whether we should perform fragmentation or segmentation
overlap (int, optional): How many bytes the fragments/segments should overlap
"""
Action.__init__(self, "fragment", "out")
self.enabled = True
self.branching = True
self.terminal = False
self.fragsize = fragsize
self.segment = segment
self.overlap = overlap
if correct_order == None:
self.correct_order = self.get_rand_order()
else:
self.correct_order = correct_order
def get_rand_order(self):
"""
Randomly decides if the fragments should be reversed.
"""
return random.choice([True, False])
def fragment(self, original, fragsize):
"""
Fragments a packet into two, given the size of the first packet (0:fragsize)
Always returns two packets
"""
if fragsize == 0:
frags = [original]
else:
frags = fragment(original, fragsize=fragsize)
# If there were more than 2 fragments, join the loads so we still have 2 packets
if len(frags) > 2:
for frag in frags[2:]:
frags[1]["IP"].load += frag["IP"].load
# After scapy fragmentation, the flags field is set to "MF+DF"
# In order for the packet to remain valid, strip out the "MF"
frags[1]["IP"].flags = "DF"
# If scapy tried to fragment but there were only enough bytes for 1 packet, just duplicate it
elif len(frags) == 1:
frags.append(frags[0].copy())
return frags[0], frags[1]
def ip_fragment(self, packet, logger):
"""
Perform IP fragmentation.
"""
if not packet.haslayer("IP") or not hasattr(packet["IP"], "load"):
return packet, packet.copy() # duplicate if no TCP or no payload to segment
load = ""
if packet.haslayer("TCP"):
load = bytes(packet["TCP"])
elif packet.haslayer("UDP"):
load = bytes(packet["UDP"])
else:
load = bytes(packet["IP"].load)
# If there is no load, duplicate the packet
if not load:
return packet, packet.copy()
if self.fragsize == -1 or (self.fragsize * 8) > len(load) or len(load) <= 8:
fragsize = int(int(((int(len(load)/2))/8))*8)
frags = self.fragment(packet.copy().packet, fragsize=fragsize)
else:
# packet can be fragmented as requested
frags = self.fragment(packet.copy().packet, fragsize=self.fragsize*8)
packet1 = layers.packet.Packet(frags[0])
packet2 = layers.packet.Packet(frags[1])
if self.correct_order:
return packet1, packet2
else:
return packet2, packet1
def tcp_segment(self, packet, logger):
"""
Segments a packet into two, given the size of the first packet (0:fragsize)
Always returns two packets, since fragment is a branching action, so if we
are unable to segment, it will duplicate the packet.
If overlap is specified, it will select n bytes from the second packet
and append them to the first, and increment the sequence number accordingly
"""
if not packet.haslayer("TCP") or not hasattr(packet["TCP"], "load") or not packet["TCP"].load:
return packet, packet.copy() # duplicate if no TCP or no payload to segment
# Get the original payload and delete it from the packet so it
# doesn't come along when copying the TCP layer
payload = packet["TCP"].load
del(packet["TCP"].load)
fragsize = self.fragsize
if self.fragsize == -1 or self.fragsize > len(payload) - 1:
fragsize = int(len(payload)/2)
# Craft new packets
# Make sure we don't go out of bounds by choosing the min
overlap_bytes = min(len(payload[fragsize:]), self.overlap)
# Attach these bytes to the first packet
pkt1 = IP(packet["IP"])/payload[:fragsize + overlap_bytes]
pkt2 = IP(packet["IP"])/payload[fragsize:]
# We cannot rely on scapy's native parsing here - if a previous action has changed the
# fragment offset, scapy will not identify this as TCP, so we must do it for scapy
if not pkt1.haslayer("TCP"):
pkt1 = IP(packet["IP"])/TCP(bytes(pkt1["IP"].load))
if not pkt2.haslayer("TCP"):
pkt2 = IP(packet["IP"])/TCP(bytes(pkt2["IP"].load))
packet1 = layers.packet.Packet(pkt1)
packet2 = layers.packet.Packet(pkt2)
# Reset packet2's SYN number
if packet2["TCP"].seq + fragsize > MAX_UINT:
# Wrap sequence numbers around if greater than MAX_UINT
packet2["TCP"].seq = packet2["TCP"].seq + fragsize - MAX_UINT - 1
else:
packet2["TCP"].seq += fragsize
del packet1["IP"].chksum
del packet2["IP"].chksum
del packet1["IP"].len
del packet2["IP"].len
del packet1["TCP"].chksum
del packet2["TCP"].chksum
del packet1["TCP"].dataofs
del packet2["TCP"].dataofs
if self.correct_order:
return [packet1, packet2]
else:
return [packet2, packet1]
def run(self, packet, logger):
"""
The fragment action fragments each given packet.
"""
logger.debug(" - Fragmenting given packet %s" % str(packet))
if self.segment:
return self.tcp_segment(packet, logger)
else:
return self.ip_fragment(packet, logger)
def __str__(self):
"""
Returns a string representation with the fragsize
"""
s = Action.__str__(self)
if not self.overlap:
ending = "}"
else:
ending = ":" + str(self.overlap) + "}"
if self.segment:
s += "{" + "tcp" + ":" + str(self.fragsize) + ":" + str(self.correct_order) + ending
else:
s += "{" + "ip" + ":"+ str(self.fragsize) + ":" + str(self.correct_order) + ending
return s
def parse(self, string, logger):
"""
Parses a string representation of fragmentation. Nothing particularly special,
but it does check for a the fragsize.
Note that the given logger is a DIFFERENT logger than the logger passed
to the other functions, and they cannot be used interchangeably. This logger
is attached to the main GA driver, and is run outside the evaluator. When the
action is actually run, it's run within the evaluator, which by necessity must
pass in a different logger.
"""
# Count the number of params in this given string
num_parameters = string.count(":")
# If num_parameters is greater than 2, it's not a valid fragment action
if num_parameters == 2:
params = string.split(":")
seg, fragsize, correct_order = params
overlap = 0
if "tcp" in seg:
self.segment = True
else:
self.segment = False
elif num_parameters == 3:
params = string.split(":")
seg, fragsize, correct_order, overlap = params
if overlap.endswith("}"):
overlap = overlap[:-1] # Chop off trailing }
if "tcp" in seg:
self.segment = True
else:
self.segment = False
else:
msg = "Cannot parse fragment action %s" % string
logger.error(msg)
raise Exception(msg)
try:
# Try to convert to int
self.fragsize = int(fragsize)
self.overlap = int(overlap)
except ValueError as e:
print(e)
msg = "Cannot parse fragment action %s" % string
logger.error(msg)
raise Exception(msg)
# Parse ordering
if correct_order.startswith('True'):
self.correct_order = True
else:
self.correct_order = False
return True
def mutate(self, environment_id=None):
"""
Mutates the fragment action - it either chooses a new segment offset,
switches the packet order, and/or changes whether it segments or fragments.
"""
self.correct_order = self.get_rand_order()
self.segment = random.choice([True, True, True, False])
if self.segment:
if random.random() < 0.5:
self.fragsize = int(random.uniform(1, 60))
else:
self.fragsize = -1
else:
if random.random() < 0.2:
self.fragsize = int(random.uniform(1, 50))
else:
self.fragsize = -1
if random.random() < .5:
# Somewhat aggressively overlap
if random.random() < .5:
if self.fragsize == -1:
self.overlap = 5
else:
self.overlap = int(self.fragsize/2)
else:
self.overlap = int(random.uniform(1, 50))
return self
|
77a1f52811a0a047ad734afed4539d23bf2895f1
|
dc9ae9fe74432013ad61d6f19d8c825d37de6bf6
|
/tailer/__init__.py
|
3c9afdc21cb1d10d5764bedd390ff8b07c0473cc
|
[
"MIT"
] |
permissive
|
six8/pytailer
|
5438ad1d002f54b44d4b00701a669e3fefb4e53c
|
a9e49714b5fd2f84b05ea2bc9de2f9a9fbad013a
|
refs/heads/master
| 2023-06-08T17:31:23.468727
| 2023-06-01T15:37:21
| 2023-06-01T15:37:21
| 3,564,015
| 114
| 41
|
MIT
| 2023-09-06T18:27:05
| 2012-02-27T19:50:23
|
Python
|
UTF-8
|
Python
| false
| false
| 8,700
|
py
|
__init__.py
|
import re
import sys
import time
class Tailer(object):
"""\
Implements tailing and heading functionality like GNU tail and head
commands.
"""
line_terminators = ("\r\n", "\n", "\r")
def __init__(self, file, read_size=1024, end=False):
self.read_size = read_size
self.file = file
self.start_pos = self.file.tell()
if end:
self.seek_end()
def splitlines(self, data):
return re.split("|".join(self.line_terminators), data)
def seek_end(self):
self.seek(0, 2)
def seek(self, pos, whence=0):
self.file.seek(pos, whence)
def tell(self):
return self.file.tell()
def read(self, read_size=None):
if read_size:
read_str = self.file.read(read_size)
else:
read_str = self.file.read()
return len(read_str), read_str
def seek_line_forward(self):
"""\
Searches forward from the current file position for a line terminator
and seeks to the charachter after it.
"""
pos = start_pos = self.file.tell()
bytes_read, read_str = self.read(self.read_size)
start = 0
if bytes_read and read_str[0] in self.line_terminators:
# The first charachter is a line terminator, don't count this one
start += 1
while bytes_read > 0:
# Scan forwards, counting the newlines in this bufferfull
i = start
while i < bytes_read:
if read_str[i] in self.line_terminators:
self.seek(pos + i + 1)
return self.file.tell()
i += 1
pos += self.read_size
self.seek(pos)
bytes_read, read_str = self.read(self.read_size)
return None
def seek_line(self):
"""\
Searches backwards from the current file position for a line terminator
and seeks to the charachter after it.
"""
pos = end_pos = self.file.tell()
read_size = self.read_size
if pos > read_size:
pos -= read_size
else:
pos = 0
read_size = end_pos
self.seek(pos)
bytes_read, read_str = self.read(read_size)
if bytes_read and read_str[-1] in self.line_terminators:
# The last charachter is a line terminator, don't count this one
bytes_read -= 1
if read_str[-2:] == "\r\n" and "\r\n" in self.line_terminators:
# found crlf
bytes_read -= 1
while bytes_read > 0:
# Scan backward, counting the newlines in this bufferfull
i = bytes_read - 1
while i >= 0:
if read_str[i] in self.line_terminators:
self.seek(pos + i + 1)
return self.file.tell()
i -= 1
if pos == 0 or pos - self.read_size < 0:
# Not enought lines in the buffer, send the whole file
self.seek(0)
return None
pos -= self.read_size
self.seek(pos)
bytes_read, read_str = self.read(self.read_size)
return None
def tail(self, lines=10):
"""\
Return the last lines of the file.
"""
self.seek_end()
end_pos = self.file.tell()
for i in range(lines):
if not self.seek_line():
break
data = self.file.read(end_pos - self.file.tell() - 1)
if data:
return self.splitlines(data)
else:
return []
def head(self, lines=10):
"""\
Return the top lines of the file.
"""
self.seek(0)
for i in range(lines):
if not self.seek_line_forward():
break
end_pos = self.file.tell()
self.seek(0)
data = self.file.read(end_pos - 1)
if data:
return self.splitlines(data)
else:
return []
def follow(self, delay=1.0):
"""\
Iterator generator that returns lines as data is added to the file.
Based on: http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/157035
"""
trailing = True
while 1:
where = self.file.tell()
line = self.file.readline()
if line:
if trailing and line in self.line_terminators:
# This is just the line terminator added to the end of the file
# before a new line, ignore.
trailing = False
continue
if line[-1] in self.line_terminators:
line = line[:-1]
if line[-1:] == "\r\n" and "\r\n" in self.line_terminators:
# found crlf
line = line[:-1]
trailing = False
yield line
else:
trailing = True
self.seek(where)
time.sleep(delay)
def __iter__(self):
return self.follow()
def close(self):
self.file.close()
def tail(file, lines=10):
"""\
Return the last lines of the file.
>>> from io import StringIO
>>> f = StringIO()
>>> for i in range(11):
... _ = f.write('Line %d\\n' % (i + 1))
>>> tail(f, 3)
['Line 9', 'Line 10', 'Line 11']
"""
return Tailer(file).tail(lines)
def head(file, lines=10):
"""\
Return the top lines of the file.
>>> from io import StringIO
>>> f = StringIO()
>>> for i in range(11):
... _ = f.write('Line %d\\n' % (i + 1))
>>> head(f, 3)
['Line 1', 'Line 2', 'Line 3']
"""
return Tailer(file).head(lines)
def follow(file, delay=1.0):
"""\
Iterator generator that returns lines as data is added to the file.
>>> import os
>>> f = open('test_follow.txt', 'w')
>>> fo = open('test_follow.txt', 'r')
>>> generator = follow(fo)
>>> _ = f.write('Line 1\\n')
>>> f.flush()
>>> next(generator)
'Line 1'
>>> _ = f.write('Line 2\\n')
>>> f.flush()
>>> next(generator)
'Line 2'
>>> f.close()
>>> fo.close()
>>> os.remove('test_follow.txt')
"""
return Tailer(file, end=True).follow(delay)
def _test():
import doctest
doctest.testmod()
def _main(filepath, options):
tailer = Tailer(open(filepath, "rb"))
try:
try:
if options.lines > 0:
if options.head:
if options.follow:
sys.stderr.write("Cannot follow from top of file.\n")
sys.exit(1)
lines = tailer.head(options.lines)
else:
lines = tailer.tail(options.lines)
for line in lines:
print(line)
elif options.follow:
# Seek to the end so we can follow
tailer.seek_end()
if options.follow:
for line in tailer.follow(delay=options.sleep):
print(line)
except KeyboardInterrupt:
# Escape silently
pass
finally:
tailer.close()
def main():
from optparse import OptionParser
import sys
parser = OptionParser(usage="usage: %prog [options] filename")
parser.add_option(
"-f",
"--follow",
dest="follow",
default=False,
action="store_true",
help="output appended data as the file grows",
)
parser.add_option(
"-n",
"--lines",
dest="lines",
default=10,
type="int",
help="output the last N lines, instead of the last 10",
)
parser.add_option(
"-t",
"--top",
dest="head",
default=False,
action="store_true",
help="output lines from the top instead of the bottom. Does not work with follow",
)
parser.add_option(
"-s",
"--sleep-interval",
dest="sleep",
default=1.0,
metavar="S",
type="float",
help="with -f, sleep for approximately S seconds between iterations",
)
parser.add_option(
"",
"--test",
dest="test",
default=False,
action="store_true",
help="Run some basic tests",
)
(options, args) = parser.parse_args()
if options.test:
_test()
elif not len(args) == 1:
parser.print_help()
sys.exit(1)
else:
_main(args[0], options)
if __name__ == "__main__":
main()
|
1b62d1bcd4fbd655a1303052a3851d9adc87d197
|
e0ed4496e94263643cedea56bfcdec1140ced8d6
|
/neupy/plots/hinton.py
|
829fdd8e8ab858ff6f032b50598e5e2032abfd4d
|
[
"MIT"
] |
permissive
|
itdxer/neupy
|
6307666271807bd9028e3e60dd2536a544ed8421
|
317ed4204b5239e8be2b94a95fe3157c5f9edc65
|
refs/heads/master
| 2023-06-13T23:09:36.487633
| 2023-01-03T21:24:56
| 2023-01-03T21:24:56
| 41,323,480
| 840
| 206
|
MIT
| 2022-12-16T16:32:10
| 2015-08-24T19:45:11
|
Python
|
UTF-8
|
Python
| false
| false
| 3,283
|
py
|
hinton.py
|
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.patches import Rectangle
from neupy.utils import format_data
__all__ = ('hinton',)
def hinton(matrix, max_weight=None, ax=None, add_legend=True):
"""
Draw Hinton diagram for visualizing a weight matrix.
Parameters
----------
matrix: array-like
Matrix that you want to visualize using Hinton diagram.
max_weight : float
Maximum value of the matrix. If it's equal to ``None``
than value would be calculated using the maximum from
the matrix. Defaults to ``None``.
ax : object
Matplotlib Axes instance. If value equal to ``None``
then function generate the new Axes instance. Defaults
to ``None``.
Returns
-------
object
Matplotlib Axes instance.
Examples
--------
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> from neupy import plots
>>>
>>> weight = np.random.randn(20, 20)
>>>
>>> plt.style.use('ggplot')
>>> plt.title("Hinton diagram")
>>> plt.figure(figsize=(16, 12))
>>>
>>> plots.hinton(weight)
>>> plt.show()
References
----------
[1] http://matplotlib.org/examples/specialty_plots/hinton_demo.html
"""
if ax is None:
ax = plt.gca()
matrix = format_data(matrix, is_feature1d=True)
if max_weight is None:
max_value = np.abs(matrix).max()
max_value_log2_base = np.log(max_value) / np.log(2)
max_weight = 2 ** np.ceil(max_value_log2_base)
ax.patch.set_facecolor('gray')
ax.set_aspect('equal', 'box')
ax.xaxis.set_major_locator(plt.NullLocator())
ax.yaxis.set_major_locator(plt.NullLocator())
for (y, x), weight in np.ndenumerate(matrix):
color = ('white' if weight > 0 else 'black')
size = min(np.sqrt(np.abs(weight / max_weight)), 1.)
rect = plt.Rectangle([x - size / 2., y - size / 2.], size, size,
facecolor=color, edgecolor=color)
ax.add_patch(rect)
ax.autoscale_view()
ax.invert_yaxis()
if add_legend:
max_value = matrix.max().round(2)
min_value = matrix.min().round(2)
white = Rectangle(xy=(0, 0), width=1., height=1., linewidth=1.,
linestyle='solid', facecolor='#ffffff')
black = Rectangle(xy=(0, 0), width=1., height=1., color='#000000')
if min_value < 0 and max_value > 0:
rectangles = [white, black]
rect_description = [
'Positive value\n'
'Max: {}'.format(max_value),
'Negative value\n'
'Min: {}'.format(min_value),
]
elif min_value >= 0:
rectangles = [white]
rect_description = [
'Positive value\n'
'Min: {}\n'
'Max: {}'.format(min_value, max_value),
]
else:
rectangles = [black]
rect_description = [
'Negative value\n'
'Min: {}\n'
'Max: {}'.format(min_value, max_value),
]
ax.legend(rectangles, rect_description, loc='center left',
bbox_to_anchor=(1., 0.5))
return ax
|
c3f0eea7a65115a232c769a40ea32c09fbfd87fc
|
56a77194fc0cd6087b0c2ca1fb6dc0de64b8a58a
|
/applications/PfemApplication/python_scripts/MainPfem.py
|
894d643b518da96aca7efade5e390aca72944a2e
|
[
"BSD-3-Clause",
"BSD-2-Clause"
] |
permissive
|
KratosMultiphysics/Kratos
|
82b902a2266625b25f17239b42da958611a4b9c5
|
366949ec4e3651702edc6ac3061d2988f10dd271
|
refs/heads/master
| 2023-08-30T20:31:37.818693
| 2023-08-30T18:01:01
| 2023-08-30T18:01:01
| 81,815,495
| 994
| 285
|
NOASSERTION
| 2023-09-14T13:22:43
| 2017-02-13T10:58:24
|
C++
|
UTF-8
|
Python
| false
| false
| 8,431
|
py
|
MainPfem.py
|
# Import kratos core and applications
import KratosMultiphysics
import KratosMultiphysics.SolidMechanicsApplication
import KratosMultiphysics.PfemApplication
import MainSolid
class PfemSolution(MainSolid.Solution):
def __init__(self, Model, file_parameters = "ProjectParameters.json", file_name = None):
super(PfemSolution, self).__init__(Model, file_parameters, file_name)
#### Main internal methods ####
def _get_processes_parameters(self):
# add fluid processes
add_fluid_process = True
if self.ProjectParameters.Has("problem_data"):
if self.ProjectParameters["problem_data"].Has("domain_type"):
if(self.ProjectParameters["problem_data"]["domain_type"].GetString() != "Solid"):
add_fluid_process = False
if add_fluid_process is True:
return self._add_fluid_processes()
else:
return MainSolid.Solution._get_processes_parameters(self)
def _add_fluid_processes(self):
# get processes parameters from base class
processes_parameters = MainSolid.Solution._get_processes_parameters(self)
# add process to manage assignation of material properties to particles
# modify processes_parameters to introduce this process in the problem_process_list
# particles concept : assign initial material percent and properties vector pointer to nodes
if(processes_parameters.Has("problem_process_list")):
problem_processes = processes_parameters["problem_process_list"]
#print(" PROBLEM_PROCESSES ", processes_parameters["problem_process_list"].PrettyPrintJsonString())
extended_problem_processes = self._set_particle_properties_process(problem_processes)
processes_parameters.AddValue("problem_process_list", extended_problem_processes)
#extended_problem_processes = self._set_volume_recovery_process(problem_processes)
#processes_parameters.AddValue("problem_process_list", extended_problem_processes)
#print(" EXTENDED_PROBLEM_PROCESSES ", processes_parameters["problem_process_list"].PrettyPrintJsonString())
if(processes_parameters.Has("constraints_process_list")):
constraints_processes = processes_parameters["constraints_process_list"]
if(self.echo_level>1):
print(" CONSTRAINTS_PROCESSES ", processes_parameters["constraints_process_list"].PrettyPrintJsonString())
extended_constraints_processes = self._set_isolated_nodes_management_process(constraints_processes)
processes_parameters.AddValue("constraints_process_list", extended_constraints_processes)
extended_constraints_processes = self._set_selected_elements_management_process(constraints_processes)
processes_parameters.AddValue("constraints_process_list", extended_constraints_processes)
if(self.echo_level>1):
print(" EXTENDED_CONSTRAINTS_PROCESSES ", processes_parameters["constraints_process_list"].PrettyPrintJsonString())
if(processes_parameters.Has("loads_process_list")):
loads_processes = processes_parameters["loads_process_list"]
if(self.echo_level>1):
print(" LOADS_PROCESSES ", processes_parameters["loads_process_list"].PrettyPrintJsonString())
extended_loads_processes = self._set_volume_acceleration_process(loads_processes)
processes_parameters.AddValue("loads_process_list", extended_loads_processes)
if(self.echo_level>1):
print(" EXTENDED_LOADS_PROCESSES ", processes_parameters["loads_process_list"].PrettyPrintJsonString())
return processes_parameters
def _set_isolated_nodes_management_process(self, constraints_processes):
default_settings = KratosMultiphysics.Parameters("""
{
"python_module" : "manage_isolated_nodes_process",
"kratos_module" : "KratosMultiphysics.PfemApplication",
"Parameters" : {}
}
""")
model_part_name = self.model.GetMainModelPart().Name
default_settings["Parameters"].AddEmptyValue("model_part_name").SetString(model_part_name)
constraints_processes.Append(default_settings)
return constraints_processes
def _set_selected_elements_management_process(self, constraints_processes):
default_settings = KratosMultiphysics.Parameters("""
{
"python_module" : "manage_selected_elements_process",
"kratos_module" : "KratosMultiphysics.PfemApplication",
"Parameters" : {}
}
""")
model_part_name = self.model.GetMainModelPart().Name
default_settings["Parameters"].AddEmptyValue("model_part_name").SetString(model_part_name)
constraints_processes.Append(default_settings)
return constraints_processes
def _set_volume_acceleration_process(self, loads_processes):
default_settings = KratosMultiphysics.Parameters("""
{
"python_module" : "assign_modulus_and_direction_to_nodes_process",
"kratos_module" : "KratosMultiphysics.SolidMechanicsApplication",
"Parameters" : {
"variable_name" : "VOLUME_ACCELERATION",
"modulus" : 9.81,
"direction" : [0.0,-1.0,0.0]
}
}
""")
if(self.ProjectParameters.Has("problem_data")):
if(self.ProjectParameters["problem_data"].Has("gravity_vector")):
import math
#get normalized direction
direction = []
scalar_prod = 0
for i in range(self.ProjectParameters["problem_data"]["gravity_vector"].size()):
direction.append( self.ProjectParameters["problem_data"]["gravity_vector"][i].GetDouble() )
scalar_prod = scalar_prod + direction[i]*direction[i]
norm = math.sqrt(scalar_prod)
self.value = []
if( norm != 0.0 ):
for j in direction:
self.value.append( j/norm )
else:
for j in direction:
self.value.append(0.0)
if(default_settings["Parameters"].Has("modulus")):
default_settings["Parameters"]["modulus"].SetDouble(norm)
if(default_settings["Parameters"].Has("direction")):
counter = 0
for i in self.value:
default_settings["Parameters"]["direction"][counter].SetDouble(i)
counter+=1
model_part_name = self.model.GetMainModelPart().Name
default_settings["Parameters"].AddEmptyValue("model_part_name").SetString(model_part_name)
loads_processes.Append(default_settings)
return loads_processes
def _set_particle_properties_process(self, problem_processes):
default_settings = KratosMultiphysics.Parameters("""
{
"python_module" : "assign_properties_to_nodes_process",
"kratos_module" : "KratosMultiphysics.PfemApplication",
"Parameters" : {
"fluid_mixture" : true,
"solid_mixture" : false
}
}
""")
model_part_name = self.model.GetMainModelPart().Name
default_settings["Parameters"].AddEmptyValue("model_part_name").SetString(model_part_name)
problem_processes.Append(default_settings)
return problem_processes
def _set_volume_recovery_process(self, problem_processes):
default_settings = KratosMultiphysics.Parameters("""
{
"python_module" : "volume_recovery_process",
"kratos_module" : "KratosMultiphysics.PfemApplication",
"Parameters" : {
}
}
""")
model_part_name = self.model.GetMainModelPart().Name
default_settings["Parameters"].AddEmptyValue("model_part_name").SetString(model_part_name)
problem_processes.Append(default_settings)
return problem_processes
@classmethod
def _class_prefix(self):
header = "::[--PFEM Simulation--]::"
return header
if __name__ == "__main__":
PfemSolution().Run()
|
719993b0656fc08df2738d79ba89150612953347
|
952dc66c61966f099756cdb6c2d13b40352f63cc
|
/zerver/migrations/0209_user_profile_no_empty_password.py
|
67276d2e9e5c1736ed2fad7064926ccd021d94a0
|
[
"Apache-2.0",
"LicenseRef-scancode-free-unknown"
] |
permissive
|
zulip/zulip
|
5ae6aad35fd9f72996c0a2a9cdd674400966ebf6
|
965a25d91b6ee2db54038f5df855215fa25146b0
|
refs/heads/main
| 2023-08-28T23:43:00.971110
| 2023-08-28T16:47:09
| 2023-08-28T19:33:02
| 43,160,685
| 20,239
| 8,996
|
Apache-2.0
| 2023-09-14T20:57:47
| 2015-09-25T16:37:25
|
Python
|
UTF-8
|
Python
| false
| false
| 11,109
|
py
|
0209_user_profile_no_empty_password.py
|
# Generated by Django 1.11.24 on 2019-10-16 22:48
from typing import Any, Set, Union
import orjson
from django.conf import settings
from django.contrib.auth.hashers import check_password, make_password
from django.db import migrations
from django.db.backends.base.schema import BaseDatabaseSchemaEditor
from django.db.migrations.state import StateApps
from django.utils.timezone import now as timezone_now
from zerver.lib.cache import cache_delete, user_profile_by_api_key_cache_key
from zerver.lib.queue import queue_json_publish
from zerver.lib.utils import generate_api_key
def ensure_no_empty_passwords(apps: StateApps, schema_editor: BaseDatabaseSchemaEditor) -> None:
"""With CVE-2019-18933, it was possible for certain users created
using social login (e.g. Google/GitHub auth) to have the empty
string as their password in the Zulip database, rather than
Django's "unusable password" (i.e. no password at all). This was a
serious security issue for organizations with both password and
Google/GitHub authentication enabled.
Combined with the code changes to prevent new users from entering
this buggy state, this migration sets the intended "no password"
state for any users who are in this buggy state, as had been
intended.
While this bug was discovered by our own development team and we
believe it hasn't been exploited in the wild, out of an abundance
of caution, this migration also resets the personal API keys for
all users where Zulip's database-level logging cannot **prove**
that user's current personal API key was never accessed using this
bug.
There are a few ways this can be proven: (1) the user's password
has never been changed and is not the empty string,
or (2) the user's personal API key has changed since that user last
changed their password (which is not ''). Both constitute proof
because this bug cannot be used to gain the access required to change
or reset a user's password.
Resetting those API keys has the effect of logging many users out
of the Zulip mobile and terminal apps unnecessarily (e.g. because
the user changed their password at any point in the past, even
though the user never was affected by the bug), but we're
comfortable with that cost for ensuring that this bug is
completely fixed.
To avoid this inconvenience for self-hosted servers which don't
even have EmailAuthBackend enabled, we skip resetting any API keys
if the server doesn't have EmailAuthBackend configured.
"""
UserProfile = apps.get_model("zerver", "UserProfile")
RealmAuditLog = apps.get_model("zerver", "RealmAuditLog")
# Because we're backporting this migration to the Zulip 2.0.x
# series, we've given it migration number 0209, which is a
# duplicate with an existing migration already merged into Zulip
# main. Migration 0247_realmauditlog_event_type_to_int.py
# changes the format of RealmAuditLog.event_type, so we need the
# following conditional block to determine what values to use when
# searching for the relevant events in that log.
event_type_class = RealmAuditLog._meta.get_field("event_type").get_internal_type()
if event_type_class == "CharField":
USER_PASSWORD_CHANGED: Union[int, str] = "user_password_changed"
USER_API_KEY_CHANGED: Union[int, str] = "user_api_key_changed"
else:
USER_PASSWORD_CHANGED = 122
USER_API_KEY_CHANGED = 127
# First, we do some bulk queries to collect data we'll find useful
# in the loop over all users below.
# Users who changed their password at any time since account
# creation. These users could theoretically have started with an
# empty password, but set a password later via the password reset
# flow. If their API key has changed since they changed their
# password, we can prove their current API key cannot have been
# exposed; we store those users in
# password_change_user_ids_no_reset_needed.
password_change_user_ids = set(
RealmAuditLog.objects.filter(event_type=USER_PASSWORD_CHANGED).values_list(
"modified_user_id", flat=True
)
)
password_change_user_ids_api_key_reset_needed: Set[int] = set()
password_change_user_ids_no_reset_needed: Set[int] = set()
for user_id in password_change_user_ids:
# Here, we check the timing for users who have changed
# their password.
# We check if the user changed their API key since their first password change.
query = RealmAuditLog.objects.filter(
modified_user=user_id,
event_type__in=[USER_PASSWORD_CHANGED, USER_API_KEY_CHANGED],
).order_by("event_time")
earliest_password_change = query.filter(event_type=USER_PASSWORD_CHANGED).first()
# Since these users are in password_change_user_ids, this must not be None.
assert earliest_password_change is not None
latest_api_key_change = query.filter(event_type=USER_API_KEY_CHANGED).last()
if latest_api_key_change is None:
# This user has never changed their API key. As a
# result, even though it's very likely this user never
# had an empty password, they have changed their
# password, and we have no record of the password's
# original hash, so we can't prove the user's API key
# was never affected. We schedule this user's API key
# to be reset.
password_change_user_ids_api_key_reset_needed.add(user_id)
elif earliest_password_change.event_time <= latest_api_key_change.event_time:
# This user has changed their password before
# generating their current personal API key, so we can
# prove their current personal API key could not have
# been exposed by this bug.
password_change_user_ids_no_reset_needed.add(user_id)
else:
password_change_user_ids_api_key_reset_needed.add(user_id)
if password_change_user_ids_no_reset_needed and settings.PRODUCTION:
# We record in this log file users whose current API key was
# generated after a real password was set, so there's no need
# to reset their API key, but because they've changed their
# password, we don't know whether or not they originally had a
# buggy password.
#
# In theory, this list can be recalculated using the above
# algorithm modified to only look at events before the time
# this migration was installed, but it's helpful to log it as well.
with open("/var/log/zulip/0209_password_migration.log", "w") as log_file:
line = "No reset needed, but changed password: {}\n"
log_file.write(line.format(password_change_user_ids_no_reset_needed))
AFFECTED_USER_TYPE_EMPTY_PASSWORD = "empty_password"
AFFECTED_USER_TYPE_CHANGED_PASSWORD = "changed_password"
MIGRATION_ID = "0209_user_profile_no_empty_password"
def write_realm_audit_log_entry(
user_profile: Any, event_time: Any, event_type: Any, affected_user_type: str
) -> None:
RealmAuditLog.objects.create(
realm=user_profile.realm,
modified_user=user_profile,
event_type=event_type,
event_time=event_time,
extra_data=orjson.dumps(
{
"migration_id": MIGRATION_ID,
"affected_user_type": affected_user_type,
}
).decode(),
)
# If Zulip's built-in password authentication is not enabled on
# the server level, then we plan to skip resetting any users' API
# keys, since the bug requires EmailAuthBackend.
email_auth_enabled = "zproject.backends.EmailAuthBackend" in settings.AUTHENTICATION_BACKENDS
# A quick note: This query could in theory exclude users with
# is_active=False, is_bot=True, or realm__deactivated=True here to
# accessing only active human users in non-deactivated realms.
# But it's better to just be thorough; users can be reactivated,
# and e.g. a server admin could manually edit the database to
# change a bot into a human user if they really wanted to. And
# there's essentially no harm in rewriting state for a deactivated
# account.
for user_profile in UserProfile.objects.all():
event_time = timezone_now()
if check_password("", user_profile.password):
# This user currently has the empty string as their password.
# Change their password and record that we did so.
user_profile.password = make_password(None)
update_fields = ["password"]
write_realm_audit_log_entry(
user_profile, event_time, USER_PASSWORD_CHANGED, AFFECTED_USER_TYPE_EMPTY_PASSWORD
)
if email_auth_enabled and not user_profile.is_bot:
# As explained above, if the built-in password authentication
# is enabled, reset the API keys. We can skip bot accounts here,
# because the `password` attribute on a bot user is useless.
reset_user_api_key(user_profile)
update_fields.append("api_key")
event_time = timezone_now()
write_realm_audit_log_entry(
user_profile,
event_time,
USER_API_KEY_CHANGED,
AFFECTED_USER_TYPE_EMPTY_PASSWORD,
)
user_profile.save(update_fields=update_fields)
continue
elif (
email_auth_enabled and user_profile.id in password_change_user_ids_api_key_reset_needed
):
# For these users, we just need to reset the API key.
reset_user_api_key(user_profile)
user_profile.save(update_fields=["api_key"])
write_realm_audit_log_entry(
user_profile, event_time, USER_API_KEY_CHANGED, AFFECTED_USER_TYPE_CHANGED_PASSWORD
)
def reset_user_api_key(user_profile: Any) -> None:
old_api_key = user_profile.api_key
user_profile.api_key = generate_api_key()
cache_delete(user_profile_by_api_key_cache_key(old_api_key))
# Like with any API key change, we need to clear any server-side
# state for sending push notifications to mobile app clients that
# could have been registered with the old API key. Fortunately,
# we can just write to the queue processor that handles sending
# those notices to the push notifications bouncer service.
event = {"type": "clear_push_device_tokens", "user_profile_id": user_profile.id}
queue_json_publish("deferred_work", event)
class Migration(migrations.Migration):
atomic = False
dependencies = [
("zerver", "0208_add_realm_night_logo_fields"),
]
operations = [
migrations.RunPython(
ensure_no_empty_passwords, reverse_code=migrations.RunPython.noop, elidable=True
),
]
|
aaaaa9b02b499a1c01a56a8d0aae34ceed22360f
|
6c00499dfe1501294ac56b0d1607fb942aafc2ee
|
/eventregistry/QueryArticle.py
|
c090cc313984a545ff3e838df8ce9881a0955860
|
[
"MIT"
] |
permissive
|
EventRegistry/event-registry-python
|
dd692729cb5c505e421d4b771804e712e5b6442b
|
bf3ce144fa61cc195840591bae5ca88b31ca9139
|
refs/heads/master
| 2023-07-06T11:04:41.033864
| 2023-06-23T08:40:31
| 2023-06-23T08:40:31
| 40,995,963
| 176
| 48
|
MIT
| 2020-10-21T09:17:06
| 2015-08-18T20:29:23
|
Python
|
UTF-8
|
Python
| false
| false
| 5,557
|
py
|
QueryArticle.py
|
from eventregistry.Base import *
from eventregistry.ReturnInfo import *
from typing import List, Union
class QueryArticle(Query):
def __init__(self,
articleUriOrUriList: Union[str, List[str]],
requestedResult: Union["RequestArticle", None] = None):
"""
Class for obtaining available info for one or more articles in the Event Registry
@param articleUriOrUriList: a single article uri or a list of article uris
@param requestedResult: the information to return as the result of the query. By default return the information about the article
"""
super(QueryArticle, self).__init__()
self._setVal("articleUri", articleUriOrUriList)
self._setVal("action", "getArticle")
self.setRequestedResult(requestedResult or RequestArticleInfo())
def _getPath(self):
return "/api/v1/article"
@staticmethod
def queryByUri(articleUriOrUriList: Union[str, List[str]]):
"""
obtain information about one or more articles by providing their article uris (newsfeed ids, such as "284017606")
@param articleUriOrUriList: single article uri or a list of article uris to query
"""
q = QueryArticle([])
q.queryParams["articleUri"] = articleUriOrUriList
return q
def setRequestedResult(self, requestArticle: "RequestArticle"):
"""
Set the single result type that you would like to be returned. If some other request type was previously set, it will be overwritten.
Result types can be the classes that extend RequestArticle base class (see classes below).
"""
assert isinstance(requestArticle, RequestArticle), "QueryArticle class can only accept result requests that are of type RequestArticle"
self.resultTypeList = [requestArticle]
class RequestArticle:
def __init__(self):
self.resultType = None
def getResultType(self):
return self.resultType
class RequestArticleInfo(RequestArticle):
def __init__(self, returnInfo: ReturnInfo = ReturnInfo(articleInfo = ArticleInfoFlags(bodyLen = -1))):
"""
return details about the article
@param returnInfo: what details should be included in the returned information
"""
super(RequestArticle, self).__init__()
self.resultType = "info"
self.__dict__.update(returnInfo.getParams("info"))
class RequestArticleSimilarArticles(RequestArticle):
def __init__(self,
page: int = 1,
count: int = 20,
lang: Union[str, List[str]] = ["eng"],
limitPerLang: int = -1,
returnInfo: ReturnInfo = ReturnInfo(articleInfo = ArticleInfoFlags(bodyLen = -1))):
"""
return a list of similar articles based on the CCA
@param page: page of the articles
@param count: number of articles to return (at most 200)
@param lang: in which language(s) should be the similar articles
@param limitPerLang: max number of articles per language to return (-1 for no limit)
@param returnInfo: what details should be included in the returned information
"""
super(RequestArticle, self).__init__()
assert page >= 1, "page has to be >= 1"
assert count <= 200, "at most 200 articles can be returned per call"
self.resultType = "similarArticles"
self.similarArticlesPage = page
self.similarArticlesCount = count
self.similarArticlesLang = lang
self.similarArticlesLimitPerLang = limitPerLang
self.__dict__.update(returnInfo.getParams("similarArticles"))
class RequestArticleDuplicatedArticles(RequestArticle):
def __init__(self,
page: int = 1,
count: int = 20,
sortBy: str = "cosSim", sortByAsc: bool = False,
returnInfo: ReturnInfo = ReturnInfo(articleInfo = ArticleInfoFlags(bodyLen = -1))):
"""
return a list of duplicated articles of the current article
@param page: page of the articles
@param count: number of articles to return (at most 200)
@param sortBy: how are the articles sorted. Options: id, date, cosSim, fq, socialScore, facebookShares, twitterShares
@param sortByAsc: should the results be sorted in ascending order (True) or descending (False)
@param returnInfo: what details should be included in the returned information
"""
super(RequestArticle, self).__init__()
assert page >= 1, "page has to be >= 1"
assert count <= 200, "at most 200 articles can be returned per call"
self.resultType = "duplicatedArticles"
self.duplicatedArticlesPage = page
self.duplicatedArticlesCount = count
self.duplicatedArticlesSortBy = sortBy
self.duplicatedArticlesSortByAsc = sortByAsc
self.__dict__.update(returnInfo.getParams("duplicatedArticles"))
class RequestArticleOriginalArticle(RequestArticle):
def __init__(self,
returnInfo: ReturnInfo = ReturnInfo(articleInfo = ArticleInfoFlags(bodyLen = -1))):
"""
return the article that is the original of the given article (the current article is a duplicate)
@param returnInfo: what details should be included in the returned information
"""
super(RequestArticle, self).__init__()
self.resultType = "originalArticle"
self.__dict__.update(returnInfo.getParams("originalArticle"))
|
9afeff51ab3211c08f70d4ef46f4c0d7f8c3bf02
|
9875d011bf7b478421a4a5a57c6b42c24c069903
|
/trame/app/demo.py
|
9a2ab05d4ca2f6b8514438f130c1f5eeca81e159
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
Kitware/trame
|
bc9a0d7d6a845050f4fb386d514bd7e9b7060a21
|
861b60718798cca2db292e65e6ad39106ba75ccd
|
refs/heads/master
| 2023-08-20T22:42:57.129511
| 2023-08-18T04:25:32
| 2023-08-18T04:25:32
| 410,108,340
| 198
| 41
|
NOASSERTION
| 2023-09-14T15:29:10
| 2021-09-24T21:38:47
|
Python
|
UTF-8
|
Python
| false
| false
| 1,886
|
py
|
demo.py
|
from trame.app import get_server
from trame.ui.vuetify import SinglePageLayout
from trame.widgets import vuetify, vtk as vtk_widgets
class Cone:
def __init__(self, server=None):
if server is None:
server = get_server()
if isinstance(server, str):
server = get_server(server)
self._server = server
self.ui()
@property
def server(self):
return self._server
@property
def ctrl(self):
return self.server.controller
@property
def state(self):
return self.server.state
def ui(self):
with SinglePageLayout(self.server) as layout:
with layout.content:
with vuetify.VContainer(fluid=True, classes="pa-0 fill-height"):
with vtk_widgets.VtkView() as view:
self.ctrl.view_reset_camera = view.reset_camera
with vtk_widgets.VtkGeometryRepresentation():
vtk_widgets.VtkAlgorithm(
vtk_class="vtkConeSource",
state=("{ resolution }",),
)
with layout.toolbar:
vuetify.VSpacer()
vuetify.VSlider(
v_model=("resolution", 6),
min=3,
max=60,
step=1,
hide_details=True,
style="max-width: 300px;",
)
with vuetify.VBtn(icon=True, click=self.ctrl.view_reset_camera):
vuetify.VIcon("mdi-crop-free")
def show_in_jupyter(server=None, **kwargs):
from trame.app.jupyter import show
cone = Cone(server)
show(cone.server, **kwargs)
def main(**kwargs):
cone = Cone()
cone.server.start(**kwargs)
if __name__ == "__main__":
main()
|
786a9cdbdde0befd53eb54d0528899df54de2bf9
|
5095200e9ca55cd3a37af34ed44448c02e2a1bb5
|
/modules/text/text_generation/plato2_en_large/model.py
|
0b322a7a29ab75d49f16e4199de152fe6b24738a
|
[
"Apache-2.0"
] |
permissive
|
PaddlePaddle/PaddleHub
|
8712603ef486c45e83eb0bc5725b0b3ed3ddbbde
|
b402610a6f0b382a978e82473b541ea1fc6cf09a
|
refs/heads/develop
| 2023-07-24T06:03:13.172978
| 2023-03-28T11:49:55
| 2023-03-28T11:49:55
| 162,672,577
| 12,914
| 2,239
|
Apache-2.0
| 2023-07-06T21:38:19
| 2018-12-21T06:00:48
|
Python
|
UTF-8
|
Python
| false
| false
| 17,119
|
py
|
model.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import namedtuple
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
def post_process_context(token_ids, reader, merge=True):
"""Post-process the context sequence."""
context = []
utt = []
for tok_id in token_ids[1:]:
if tok_id == reader.eos_id:
utt = reader.tokenizer.convert_ids_to_tokens(utt)
if merge:
utt = reader.tokenizer.merge_subword(utt)
context.append(utt)
utt = []
else:
utt.append(tok_id)
return context
def post_process_response(token_ids, reader, merge=True):
"""
Post-process the decoded sequence. Truncate from the first
<eos> and remove the <bos> and <eos> tokens currently.
"""
eos_pos = len(token_ids)
for i, tok_id in enumerate(token_ids):
if tok_id == reader.eos_id:
eos_pos = i
break
token_ids = token_ids[1:eos_pos]
response = reader.tokenizer.convert_ids_to_tokens(token_ids)
if merge:
response = reader.tokenizer.merge_subword(response)
return token_ids, response
def get_cross_turn_repetition(context, pred_tokens, eos_idx, is_cn=False):
"""Get cross-turn repetition."""
if len(pred_tokens) == 0:
return 1.0
if is_cn:
context = ["".join(utt) for utt in context]
pred_tokens = "".join(pred_tokens)
pred_tri_grams = set()
for i in range(len(pred_tokens) - 2):
tri_gram = tuple(pred_tokens[i:i + 3])
pred_tri_grams.add(tri_gram)
for utt in context:
for i in range(len(utt) - 2):
tri_gram = tuple(utt[i:i + 3])
if tri_gram in pred_tri_grams:
return 1.0
return 0.0
def get_in_turn_repetition(pred, is_cn=False):
"""Get in-turn repetition."""
if len(pred) == 0:
return 1.0
if isinstance(pred[0], str):
pred = [tok.lower() for tok in pred]
if is_cn:
pred = "".join(pred)
tri_grams = set()
for i in range(len(pred) - 2):
tri_gram = tuple(pred[i:i + 3])
if tri_gram in tri_grams:
return 1.0
tri_grams.add(tri_gram)
return 0.0
class Plato2EncoderLayer(nn.Layer):
def __init__(self, n_head, hidden_size, attn_dropout, act_dropout):
super(Plato2EncoderLayer, self).__init__()
self.self_attn = nn.MultiHeadAttention(hidden_size, n_head, attn_dropout)
self.pre_norm_layer = nn.LayerNorm(hidden_size)
self.post_norm_layer = nn.LayerNorm(hidden_size)
self.fc1 = nn.Linear(hidden_size, hidden_size * 4)
self.fc2 = nn.Linear(hidden_size * 4, hidden_size)
self.dropout_layer = nn.Dropout(act_dropout)
self.gelu_layer = nn.GELU()
def forward(self, x, attn_mask, cache):
query = self.pre_norm_layer(x)
attn_output, new_cache = self.self_attn(query, None, None, attn_mask, cache)
attn_output = self.dropout_layer(attn_output)
attn_output = attn_output + x
ffd_input = self.post_norm_layer(attn_output)
ffd_output = self.fc1(ffd_input)
ffd_output = self.gelu_layer(ffd_output)
ffd_output = self.dropout_layer(ffd_output)
ffd_output = self.fc2(ffd_output)
ffd_output = self.dropout_layer(ffd_output)
out = ffd_output + attn_output
return out, new_cache
def gen_cache(self, key):
return self.self_attn.gen_cache(key)
class Plato2Encoder(nn.Layer):
def __init__(self, vocab_size, type_size, max_position_seq_len, num_layers, n_head, hidden_size, attn_dropout,
act_dropout):
super(Plato2Encoder, self).__init__()
self.n_head = n_head
self.word_embedding_layer = nn.Embedding(vocab_size, hidden_size)
self.sent_embedding_layer = nn.Embedding(type_size, hidden_size)
self.pos_embedding_layer = nn.Embedding(max_position_seq_len, hidden_size)
self.encoder_layers = []
for i in range(num_layers):
encoder_layer = Plato2EncoderLayer(n_head, hidden_size, attn_dropout, act_dropout)
self.encoder_layers.append(encoder_layer)
self.add_sublayer('layers.' + str(i), encoder_layer)
self.post_encoder_layer_norm = nn.LayerNorm(hidden_size)
self.dropout_layer = nn.Dropout(act_dropout)
def forward(self, caches, token_ids, type_ids, pos_ids, generation_mask, aux_emb=None):
out, self_attn_mask = self.gen_input(token_ids, type_ids, pos_ids, generation_mask, aux_emb)
new_caches = []
for i, encoder_layer in enumerate(self.encoder_layers):
out, new_cache = encoder_layer(out, self_attn_mask, caches[i])
new_caches.append(new_cache)
enc_output = self.post_encoder_layer_norm(out)
return enc_output, new_caches
def gen_input(self, token_ids, type_ids, pos_ids, input_mask, aux_emb=None):
token_emb_out = self.word_embedding_layer(token_ids)
type_emb_out = self.sent_embedding_layer(type_ids)
pos_emb_out = self.pos_embedding_layer(pos_ids)
emb_out = token_emb_out + type_emb_out + pos_emb_out
# auxiliary memory embeddings
if aux_emb is not None:
emb_out = paddle.concat([aux_emb, emb_out], axis=1)
emb_out = self.dropout_layer(emb_out)
# generate n-head self-attention mask
self_attn_mask = input_mask
self_attn_mask = paddle.scale(x=self_attn_mask, scale=1e4, bias=-1.0, bias_after_scale=False)
n_head_self_attn_mask = paddle.stack(x=[self_attn_mask] * self.n_head, axis=1)
n_head_self_attn_mask.stop_gradient = True
return emb_out, n_head_self_attn_mask
def gen_caches(self, key):
caches = [encoder_layer.gen_cache(key) for encoder_layer in self.encoder_layers]
return caches
class NSP(nn.Layer):
def __init__(self, vocab_size, type_size, max_position_seq_len, num_layers, n_head, hidden_size, attn_dropout,
act_dropout):
super(NSP, self).__init__()
self.n_head = n_head
self.hidden_size = hidden_size
self.word_embedding_layer = nn.Embedding(vocab_size, hidden_size)
self.sent_embedding_layer = nn.Embedding(type_size, hidden_size)
self.pos_embedding_layer = nn.Embedding(max_position_seq_len, hidden_size)
encoder_layer = nn.TransformerEncoderLayer(hidden_size, n_head, hidden_size * 4, act_dropout, 'gelu',
attn_dropout, act_dropout, 'True')
encoder_norm = nn.LayerNorm(hidden_size)
self.encoder = nn.TransformerEncoder(encoder_layer, num_layers, encoder_norm)
self.fc1 = nn.Linear(hidden_size, hidden_size)
self.fc2 = nn.Linear(hidden_size, 2)
self.dropout_layer = nn.Dropout(act_dropout)
self.tanh_layer = nn.Tanh()
self.softmax = nn.Softmax()
def forward(self, inputs):
token_ids = inputs['token_ids']
type_ids = inputs['type_ids']
pos_ids = inputs['pos_ids']
attention_mask = inputs['attention_mask']
label_pos = inputs["label_pos"]
out, self_attn_mask = self.gen_input(token_ids, type_ids, pos_ids, attention_mask)
# [-1, seq_len, hidden_size]
enc_out = self.encoder(out, self_attn_mask)
enc_out = paddle.reshape(enc_out, [-1, self.hidden_size])
label_pos = paddle.cast(label_pos, 'int64')
out = paddle.gather(enc_out, label_pos)
pooled_out = self.fc1(out)
pooled_out = self.tanh_layer(pooled_out)
# [-1, 2]
logits = self.fc2(pooled_out)
probs = self.softmax(logits)
return probs
def gen_input(self, token_ids, type_ids, pos_ids, input_mask, aux_emb=None):
token_emb_out = self.word_embedding_layer(token_ids)
type_emb_out = self.sent_embedding_layer(type_ids)
pos_emb_out = self.pos_embedding_layer(pos_ids)
emb_out = token_emb_out + type_emb_out + pos_emb_out
# auxiliary memory embeddings
if aux_emb is not None:
emb_out = paddle.concat([aux_emb, emb_out], axis=1)
emb_out = self.dropout_layer(emb_out)
# generate n-head self-attention mask
self_attn_mask = input_mask
self_attn_mask = paddle.scale(x=self_attn_mask, scale=1e4, bias=-1.0, bias_after_scale=False)
n_head_self_attn_mask = paddle.stack(x=[self_attn_mask] * self.n_head, axis=1)
n_head_self_attn_mask.stop_gradient = True
return emb_out, n_head_self_attn_mask
class Plato2InferModel(nn.Layer):
def __init__(self,
nsp_reader,
num_layers,
n_head,
hidden_size,
vocab_size=8001,
type_size=2,
latent_type_size=20,
max_position_seq_len=256,
act_dropout=0.1,
attn_dropout=0.1,
max_dec_len=64,
min_dec_len=1,
topk=10):
super(Plato2InferModel, self).__init__()
self.nsp_reader = nsp_reader
self.num_layers = num_layers
self.latent_type_size = latent_type_size
self.max_dec_len = max_dec_len
self.min_dec_len = min_dec_len
self.topk = topk
self.unk_id = 0
self.bos_id = 1
self.eos_id = 2
self.mask_id = 8000
self.after_eos = paddle.ones([vocab_size]) * -1e9
self.after_eos[self.eos_id] = 0
self.is_cn = False
self.batch_size = 1
self.latent_weight = paddle.create_parameter([hidden_size, latent_type_size], 'float32')
self.plato2_encoder = Plato2Encoder(vocab_size, type_size, max_position_seq_len, num_layers, n_head,
hidden_size, attn_dropout, act_dropout)
self.logits_fc_layer = nn.Linear(hidden_size, hidden_size)
self.logits_layer_norm = nn.LayerNorm(hidden_size)
self.logits_bias = paddle.create_parameter([vocab_size], 'float32', is_bias=True)
self.nsp_predictor = NSP(vocab_size, type_size, max_position_seq_len, num_layers, n_head, hidden_size,
attn_dropout, act_dropout)
self.gelu_layer = nn.GELU()
self.softmax = nn.Softmax()
@paddle.no_grad()
def forward(self, inputs):
token_ids = inputs['token_ids']
type_ids = inputs['type_ids']
pos_ids = inputs['pos_ids']
generation_mask = inputs['generation_mask']
latent_id = inputs['latent_id']
data_id = inputs['data_id']
# [-1, 1, latent_type_size]
latent_id = F.one_hot(latent_id, self.latent_type_size)
# [-1, 1, hidden_size]
latent_emb = paddle.matmul(latent_id, self.latent_weight, transpose_y=True)
caches = self.plato2_encoder.gen_caches(token_ids)
# [-1, seq_len + 1, hidden_size]
enc_out, new_caches = self.plato2_encoder(caches, token_ids, type_ids, pos_ids, generation_mask, latent_emb)
pred_ids = self.decode(inputs, new_caches)
nsp_inputs = self.gen_nsp_input(token_ids, pred_ids)
# [-1, 2]
probs = self.nsp_predictor(nsp_inputs)
return self.get_results(data_id, token_ids, pred_ids, probs)
def decode(self, inputs, caches):
tgt_ids = inputs['tgt_ids']
tgt_pos = inputs['tgt_pos']
tgt_generation_mask = inputs['tgt_generation_mask']
predictions = tgt_ids
# TODO
step = 0
while step < self.max_dec_len:
# [-1, 1]
append_mask = paddle.cast(tgt_ids != self.eos_id, dtype=tgt_generation_mask.dtype)
tgt_generation_mask = paddle.concat([tgt_generation_mask, paddle.unsqueeze(append_mask, 1)], axis=-1)
tgt_sent = paddle.ones([tgt_generation_mask.shape[0], 1], dtype=tgt_ids.dtype)
# [-1, 1, hidden_size]
out, caches = self.plato2_encoder(caches, tgt_ids, tgt_sent, tgt_pos, tgt_generation_mask)
out = paddle.squeeze(out, axis=1)
# [-1, hidden_size]
trans = self.logits_fc_layer(out)
trans = self.gelu_layer(trans)
trans = self.logits_layer_norm(trans)
# [-1, vocab_size]
logits = paddle.matmul(trans, self.plato2_encoder.word_embedding_layer.weight,
transpose_y=True) + self.logits_bias
logits[:, self.unk_id] = -1e9
logits[:, self.bos_id] = -1e9
logits[:, self.mask_id] = -1e9
if step < self.min_dec_len:
logits[:, self.eos_id] = -1e9
logits = logits * append_mask + (1 - append_mask) * self.after_eos
probs = self.softmax(logits)
# [-1, topk]
topk_probs, _ = paddle.topk(probs, k=self.topk)
mask = paddle.cast(probs >= topk_probs[:, -1:], 'float32')
sums = paddle.sum(topk_probs, axis=-1, keepdim=True)
new_probs = probs * mask / sums
# [-1, 1]
sampling_ids = paddle.multinomial(new_probs)
step = step + 1
tgt_ids = sampling_ids
tgt_pos = tgt_pos + 1
predictions = paddle.concat([predictions, tgt_ids], axis=1)
return predictions
def gen_nsp_input(self, token_ids, pred_ids):
token_ids = token_ids.numpy()
pred_ids = pred_ids.numpy()
def __reader__():
headers = ["src", "tgt", "data_id"]
Example = namedtuple("Example", headers)
for i, (raw, pred) in enumerate(zip(token_ids, pred_ids)):
context = post_process_context(raw, self.nsp_reader, merge=False)
_, response = post_process_response(pred, self.nsp_reader, merge=False)
context_tokenized_input = " [SEP] ".join(" ".join(utt) for utt in context)
response_tokenized_input = " ".join(response)
example = Example(src=context_tokenized_input, tgt=response_tokenized_input, data_id=i)
data = self.nsp_reader._convert_example_to_record(example, is_infer=True)
yield data
return
generator = self.nsp_reader.data_generator(
reader=__reader__,
is_infer=True,
phase="test",
)
inputs = next(generator())
#print('\nnsp_inputs:')
for key in inputs:
inputs[key] = paddle.to_tensor(inputs[key])
if key in ['token_ids', 'type_ids', 'pos_ids']:
inputs[key] = paddle.squeeze(inputs[key], axis=-1)
#print(key, inputs[key].shape)
#print(inputs[key])
return inputs
def get_results(self, data_id, token_ids, pred_ids, probs):
data_id = data_id.numpy()
token_ids = token_ids.numpy()
pred_ids = pred_ids.numpy()
probs = probs.numpy()
infos = []
for raw, pred, prob in zip(token_ids, pred_ids, probs):
tokens = post_process_context(raw, self.nsp_reader)
pred_token_ids, pred_tokens = post_process_response(pred, self.nsp_reader)
info = {}
info['response'] = ' '.join(pred_tokens)
cross_turn_repetition = get_cross_turn_repetition(tokens, pred_tokens, self.nsp_reader.eos_id, self.is_cn)
in_turn_repetition = max(get_in_turn_repetition(pred_tokens, self.is_cn),
get_in_turn_repetition(pred_token_ids))
info['score'] = float(prob[1])
if len(pred_token_ids) >= self.max_dec_len:
info['score'] -= 1e3
elif cross_turn_repetition > 0:
info['score'] -= 1e3
elif in_turn_repetition > 0:
info['score'] -= 1e3
infos.append(info)
results = []
pre_idx = 0
sample = []
for idx, info in zip(data_id, infos):
if idx != pre_idx:
sample = sorted(sample, key=lambda info: -info["score"])
result = sample[0]
result['data_id'] = pre_idx
results.apeend(result)
sample = []
pre_idx = idx
sample.append(info)
if sample:
sample = sorted(sample, key=lambda info: -info["score"])
result = sample[0]
result['data_id'] = pre_idx
results.append(result)
return results
|
5849ea9ead3853b30f46093b94f1406dbc207295
|
cfb41f392fac304095a80d08497727c621550c00
|
/examples/euler_3d/Sedov.py
|
af665c3d2b21b9cb6c66f5b682609c2065b449bb
|
[
"BSD-3-Clause"
] |
permissive
|
clawpack/pyclaw
|
5b7121b63609c2cf9af30e012c9318e3b5244f18
|
6323b7295b80f33285b958b1a2144f88f51be4b1
|
refs/heads/master
| 2023-04-16T23:48:31.519427
| 2023-03-21T06:08:21
| 2023-03-21T06:08:21
| 1,628,711
| 124
| 97
|
BSD-3-Clause
| 2023-09-12T12:22:30
| 2011-04-18T03:11:21
|
Fortran
|
UTF-8
|
Python
| false
| false
| 3,996
|
py
|
Sedov.py
|
#!/usr/bin/env python
# encoding: utf-8
"""
Test problem demonstrating a Sedov blast wave problem.
A spherical step function energy perturbation is initialized at the center of
the domain. This creates an expanding shock wave.
This problem evolves the 3D Euler equations.
The primary variables are:
density (rho), x,y, and z momentum (rho*u,rho*v,rho*w), and energy.
"""
from __future__ import absolute_import
import numpy as np
from scipy import integrate
from clawpack import riemann
from clawpack.riemann.euler_3D_constants import density, x_momentum, \
y_momentum, z_momentum, energy, num_eqn
from six.moves import range
gamma = 1.4 # Ratio of Specific Heats
x0 = 0.0; y0 = 0.0; z0 = 0.0 # Sphere location
rmax = 0.10 # Radius of Sedov Sphere
def sphere_top(y, x):
z2 = rmax**2 - (x-x0)**2 - (y-y0)**2
if z2 < 0:
return 0
else:
return np.sqrt(z2)
def sphere_bottom(y, x):
return -sphere_top(y,x)
def f(y, x, zdown, zup):
top = min(sphere_top(y,x), zup)
bottom = min(top,max(sphere_bottom(y,x), zdown))
return top-bottom
def setup(kernel_language='Fortran', solver_type='classic', use_petsc=False,
dimensional_split=False, outdir='Sedov_output', output_format='hdf5',
disable_output=False, num_cells=(64,64,64),
tfinal=0.10, num_output_times=10):
if use_petsc:
import clawpack.petclaw as pyclaw
else:
from clawpack import pyclaw
if solver_type=='classic':
solver = pyclaw.ClawSolver3D(riemann.euler_3D)
solver.dimensional_split = dimensional_split
solver.limiters = pyclaw.limiters.tvd.minmod
solver.cfl_max = 0.6
solver.cfl_desired = 0.55
solver.dt_initial = 3e-4
else:
raise Exception('Unrecognized solver_type.')
x = pyclaw.Dimension(-1.0, 1.0, num_cells[0], name='x')
y = pyclaw.Dimension(-1.0, 1.0, num_cells[1], name='y')
z = pyclaw.Dimension(-1.0, 1.0, num_cells[2], name='z')
domain = pyclaw.Domain([x,y,z])
state = pyclaw.State(domain,num_eqn)
state.problem_data['gamma']=gamma
grid = state.grid
X,Y,Z = grid.p_centers
r = np.sqrt((X-x0)**2 + (Y-y0)**2 + (Z-z0)**2)
state.q[density, :,:,:] = 1.0
state.q[x_momentum,:,:,:] = 0.
state.q[y_momentum,:,:,:] = 0.
state.q[z_momentum,:,:,:] = 0.
background_pressure = 1.0e-2
Eblast = 0.851072
pressure_in = Eblast*(gamma-1.)/(4./3.*np.pi*rmax**3)
state.q[energy,:,:,:] = background_pressure/(gamma-1.) # energy (e)
# Compute cell fraction inside initial perturbed sphere
dx, dy, dz = state.grid.delta
dx2, dy2, dz2 = [d/2. for d in state.grid.delta]
dmax = max(state.grid.delta)
for i in range(state.q.shape[1]):
for j in range(state.q.shape[2]):
for k in range(state.q.shape[3]):
if r[i,j,k] - dmax > rmax:
continue
xdown = X[i,j,k] - dx2
xup = X[i,j,k] + dx2
ydown = lambda x : Y[i,j,k] - dy2
yup = lambda x : Y[i,j,k] + dy2
zdown = Z[i,j,k] - dz2
zup = Z[i,j,k] + dz2
infrac,abserr = integrate.dblquad(f,xdown,xup,ydown,yup,args=(zdown,zup),epsabs=1.e-3,epsrel=1.e-2)
infrac=infrac/(dx*dy*dz)
p = background_pressure + pressure_in*infrac # pressure
state.q[energy,i,j,k] = p/(gamma-1.) # energy (e)
solver.all_bcs = pyclaw.BC.extrap
claw = pyclaw.Controller()
claw.solution = pyclaw.Solution(state, domain)
claw.solver = solver
claw.output_format = output_format
claw.keep_copy = True
if disable_output:
claw.output_format = None
claw.tfinal = tfinal
claw.num_output_times = num_output_times
claw.outdir = outdir
return claw
# __main__()
if __name__=="__main__":
from clawpack.pyclaw.util import run_app_from_main
output = run_app_from_main(setup)
|
c3b63baa9d6bbd88c20f949750a3206c4b1a8394
|
769f6d88fd777459eb60eb1bbb0fba17cb20d963
|
/Chapter03/create_messages.py
|
b6ea5807c7af8888a4f54808140e00dbfebf1e97
|
[
"MIT"
] |
permissive
|
PacktPublishing/Python-Web-Scraping-Cookbook
|
141379d09abe2c7d8f408858a2eb44ff0fe3ef26
|
030eb974ba1437b2590b59d38f19fb697bbf9d4c
|
refs/heads/master
| 2023-02-16T04:29:49.942243
| 2023-01-30T04:19:03
| 2023-01-30T04:19:03
| 120,744,571
| 115
| 105
|
MIT
| 2019-10-03T17:38:37
| 2018-02-08T10:08:48
|
HTML
|
UTF-8
|
Python
| false
| false
| 765
|
py
|
create_messages.py
|
from urllib.request import urlopen
from bs4 import BeautifulSoup
import boto3
import botocore
# create sqs client
sqs = boto3.client('sqs', "us-west-2")
# create / open the SQS queue
queue = sqs.create_queue(QueueName="PlanetMoreInfo")
print (queue)
# read and parse the planets HTML
html = urlopen("http://127.0.0.1:8080/pages/planets.min.html")
bsobj = BeautifulSoup(html, "lxml")
planets = []
planet_rows = bsobj.html.body.div.table.findAll("tr", {"class": "planet"})
for i in planet_rows:
tds = i.findAll("td")
# get the URL
more_info_url = tds[5].findAll("a")[0]["href"].strip()
# send the URL to the queue
sqs.send_message(QueueUrl=queue["QueueUrl"],
MessageBody=more_info_url)
print("Sent %s to %s" % (more_info_url, queue["QueueUrl"]))
|
0ea32994be9721c20d6703d1b45e60ade33161f9
|
529e713a78e82de2ae5d44cfb8ef209e0894d72a
|
/iterate-through-dictionary-python/sort-values-only.py
|
8154866db3d68d12ff056c800060b6ff193c5054
|
[
"MIT"
] |
permissive
|
realpython/materials
|
cd2f548276be2c82f134ca03eadb1cd279e0f26e
|
d2d62756d3854f54a12a767f2bf9470486c0ceef
|
refs/heads/master
| 2023-09-05T22:12:29.806738
| 2023-08-31T20:56:28
| 2023-08-31T20:56:28
| 132,374,697
| 4,678
| 6,482
|
MIT
| 2023-09-12T22:22:06
| 2018-05-06T20:46:18
|
HTML
|
UTF-8
|
Python
| false
| false
| 124
|
py
|
sort-values-only.py
|
incomes = {"apple": 5600.00, "orange": 3500.00, "banana": 5000.00}
for value in sorted(incomes.values()):
print(value)
|
7d0074dc89ef118903256023d109050d9d0d8ca0
|
a79c7c01b97e391bcd833a8b7b1dfd7cf982d59c
|
/tests/dummy_env.py
|
0c9e5e1a03fdffa335655bc700f8782bcdd16f7e
|
[
"Apache-2.0"
] |
permissive
|
learnables/cherry
|
32f3a545563e0446ad3b0243a5cc8225033c0a7f
|
f4164a53dcc762ac5ce53a761fb54f3f69847f90
|
refs/heads/master
| 2023-06-27T14:20:06.027516
| 2023-06-26T01:34:54
| 2023-06-26T01:34:54
| 159,752,575
| 185
| 33
|
Apache-2.0
| 2023-06-26T01:34:56
| 2018-11-30T01:46:38
|
Python
|
UTF-8
|
Python
| false
| false
| 1,082
|
py
|
dummy_env.py
|
#!/usr/bin/env python3
import random
import gym
import numpy as np
class Dummy(gym.Env):
"""
A dummy environment that returns random states and rewards.
"""
def __init__(self):
low = np.array([-5, -5, -5, -5, -5])
high = -np.array([-5, -5, -5, -5, -5])
self.observation_space = gym.spaces.Box(low, high, dtype=np.float32)
self.action_space = gym.spaces.Box(low, high, dtype=np.float32)
self.rng = random.Random()
def step(self, action):
assert self.observation_space.contains(action)
next_state = self.observation_space.sample()
reward = action.sum()
done = random.random() > 0.95
info = {}
return next_state, reward, done, info
def reset(self):
return self.observation_space.sample()
def seed(self, seed=1234):
self.rng.seed(seed)
np.random.seed(seed)
def _render(self, mode='human', close=False):
pass
def _take_action(self, action):
pass
def _get_reward(self):
return self.rng.randint(0, 10)
|
0f6acfbf7e7fb9aae4577f03455b0f1ae1bb8b20
|
9abc1fe64663e658c1926f0e238004ce890437bf
|
/tester/test_handlers/test_post_info_relation_handler.py
|
2f9d0fe832cd81a25bf18b950c950c9daeeb9623
|
[
"MIT"
] |
permissive
|
bukun/TorCMS
|
e7a8a3a0e4e728e64d2a34c56d694e48e0e3a098
|
f9afae46a5029d213d5fb60850c93b37b813ae15
|
refs/heads/master
| 2023-08-31T05:37:35.861174
| 2023-08-29T02:41:12
| 2023-08-29T02:41:12
| 30,642,412
| 256
| 105
|
MIT
| 2023-07-20T02:24:20
| 2015-02-11T10:22:06
|
CSS
|
UTF-8
|
Python
| false
| false
| 392
|
py
|
test_post_info_relation_handler.py
|
# -*- coding:utf-8 -*-
'''
Test
'''
from tornado.testing import AsyncHTTPSTestCase
from application import APP
class TestPostHandler(AsyncHTTPSTestCase):
def get_app(self):
'''
Test
'''
return APP
def test_Rel(self):
'''
Test post.
'''
response = self.fetch('/rel/1/')
self.assertEqual(response.code, 200)
|
4e7c13a7298a6b997fc3ceddf93f66c506555a8d
|
b7314f9480634b2f2998c8181d4284d2b52ebba1
|
/src/python/txtai/cloud/storage.py
|
ac13b1ecd644acfc5b157027c66c2b7d54f71b24
|
[
"Apache-2.0",
"LicenseRef-scancode-proprietary-license"
] |
permissive
|
neuml/txtai
|
3ca6fba11126d650ea4f2cf5199011a52ea56e4e
|
789a4555cb60ee9cdfa69afae5a5236d197e2b07
|
refs/heads/master
| 2023-08-31T08:09:31.834178
| 2023-08-29T15:36:23
| 2023-08-29T15:36:23
| 286,301,447
| 4,804
| 387
|
Apache-2.0
| 2023-09-11T17:12:40
| 2020-08-09T19:14:59
|
Python
|
UTF-8
|
Python
| false
| false
| 2,983
|
py
|
storage.py
|
"""
Object storage module
"""
import os
# Conditional import
try:
from libcloud.storage.providers import get_driver, DRIVERS
from libcloud.storage.types import ContainerDoesNotExistError, ObjectDoesNotExistError
LIBCLOUD = True
except ImportError:
LIBCLOUD, DRIVERS = False, None
from .base import Cloud
class ObjectStorage(Cloud):
"""
Object storage cloud provider backed by Apache libcloud.
"""
@staticmethod
def isprovider(provider):
"""
Checks if this provider is an object storage provider.
Args:
provider: provider name
Returns:
True if this is an object storage provider
"""
return LIBCLOUD and provider and provider in DRIVERS
def __init__(self, config):
super().__init__(config)
if not LIBCLOUD:
raise ImportError('Cloud object storage is not available - install "cloud" extra to enable')
# Get driver for provider
driver = get_driver(config["provider"])
# Get client connection
self.client = driver(
config.get("key", os.environ.get("ACCESS_KEY")),
config.get("secret", os.environ.get("ACCESS_SECRET")),
host=config.get("host"),
port=config.get("port"),
token=config.get("token"),
region=config.get("region"),
)
def metadata(self, path=None):
try:
# If this is an archive path, check if file exists
if self.isarchive(path):
return self.client.get_object(self.config["container"], os.path.basename(path))
# Otherwise check if container exists
return self.client.get_container(self.config["container"])
except (ContainerDoesNotExistError, ObjectDoesNotExistError):
return None
def load(self, path=None):
# Download archive file
if self.isarchive(path):
obj = self.client.get_object(self.config["container"], os.path.basename(path))
obj.download(path, overwrite_existing=True)
# Download all files in container
else:
# Create local directory, if necessary
os.makedirs(path, exist_ok=True)
container = self.client.get_container(self.config["container"])
for obj in container.list_objects():
obj.download(os.path.join(path, obj.name), overwrite_existing=True)
return path
def save(self, path):
# Get or create container
try:
container = self.client.get_container(self.config["container"])
except ContainerDoesNotExistError:
container = self.client.create_container(self.config["container"])
# Upload files
for f in self.listfiles(path):
with open(f, "rb") as iterator:
self.client.upload_object_via_stream(iterator=iterator, container=container, object_name=os.path.basename(f))
|
606e1a95d33afeeefeb28a85d6f8b27d87392b78
|
c2d48caa5db7e746a38beca625406fcf47379d3c
|
/src/olympia/scanners/migrations/0051_auto_20221108_1701.py
|
fdce2685a89b7759860037f432b070f5e5199885
|
[] |
permissive
|
mozilla/addons-server
|
1f6269ec0a4aa5a0142a5f81978ef674daf213a7
|
e0f043bca8a64478e2ba62f877c9dc28620be22f
|
refs/heads/master
| 2023-09-01T09:34:41.867534
| 2023-09-01T07:21:22
| 2023-09-01T07:21:22
| 16,416,867
| 920
| 590
|
BSD-3-Clause
| 2023-09-14T16:15:01
| 2014-01-31T18:44:15
|
Python
|
UTF-8
|
Python
| false
| false
| 734
|
py
|
0051_auto_20221108_1701.py
|
# Generated by Django 3.2.16 on 2022-11-08 17:01
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('scanners', '0050_auto_20221107_1721'),
]
operations = [
migrations.AddField(
model_name='scannerqueryrule',
name='description',
field=models.CharField(blank=True, default='', help_text='Human readable description for the scanner rule', max_length=255),
),
migrations.AddField(
model_name='scannerrule',
name='description',
field=models.CharField(blank=True, default='', help_text='Human readable description for the scanner rule', max_length=255),
),
]
|
6e9253116793f8cec4867e97fb38024252f5189e
|
8ca19f1a31070738b376c0370c4bebf6b7efcb43
|
/office365/sharepoint/publishing/video/channel.py
|
d5044a38ee42712566bbc309b5c9b7e7d08c689d
|
[
"MIT"
] |
permissive
|
vgrem/Office365-REST-Python-Client
|
2ef153d737c6ed5445ba1e446aeaec39c4ef4ed3
|
cbd245d1af8d69e013c469cfc2a9851f51c91417
|
refs/heads/master
| 2023-09-02T14:20:40.109462
| 2023-08-31T19:14:05
| 2023-08-31T19:14:05
| 51,305,798
| 1,006
| 326
|
MIT
| 2023-08-28T05:38:02
| 2016-02-08T15:24:51
|
Python
|
UTF-8
|
Python
| false
| false
| 550
|
py
|
channel.py
|
from office365.runtime.client_result import ClientResult
from office365.runtime.queries.service_operation import ServiceOperationQuery
from office365.sharepoint.base_entity import BaseEntity
class VideoChannel(BaseEntity):
def get_video_count(self):
return_type = ClientResult(self.context)
qry = ServiceOperationQuery(self, "GetVideoCount", None, None, None, return_type)
self.context.add_query(qry)
return return_type
@property
def entity_type_name(self):
return "SP.Publishing.VideoChannel"
|
c1a10892556d5a9959a2f6aaafcade20e33d6c7b
|
fa1ad2e2ac7e376fc7cb3b3a6e1bb88eed3e80be
|
/dts/airbyte/airbyte-integrations/connectors/source-linkedin-pages/source_linkedin_pages/source.py
|
49ea9e626d14fc14fe3b5cf1e47947e674466dba
|
[
"MIT",
"Elastic-2.0",
"Apache-2.0",
"BSD-3-Clause"
] |
permissive
|
alldatacenter/alldata
|
7bc7713c9f1d56ad6b8e59ea03206d1073b7e047
|
8d5f9a2d49ab8f9e85ccf058cb02c2fda287afc6
|
refs/heads/master
| 2023-08-05T07:32:25.442740
| 2023-08-03T13:17:24
| 2023-08-03T13:17:24
| 213,321,771
| 774
| 250
|
Apache-2.0
| 2023-09-06T17:35:32
| 2019-10-07T07:36:18
| null |
UTF-8
|
Python
| false
| false
| 6,349
|
py
|
source.py
|
#
# Copyright (c) 2023 Airbyte, Inc., all rights reserved.
#
from abc import ABC
from typing import Any, Iterable, List, Mapping, MutableMapping, Optional, Tuple
import requests
from airbyte_cdk import AirbyteLogger
from airbyte_cdk.models import SyncMode
from airbyte_cdk.sources import AbstractSource
from airbyte_cdk.sources.streams import Stream
from airbyte_cdk.sources.streams.http import HttpStream
from airbyte_cdk.sources.streams.http.auth import Oauth2Authenticator, TokenAuthenticator
class LinkedinPagesStream(HttpStream, ABC):
url_base = "https://api.linkedin.com/v2/"
primary_key = None
def __init__(self, config):
super().__init__(authenticator=config.get("authenticator"))
self.config = config
@property
def org(self):
"""Property to return the user Organization Id from input"""
return self.config.get("org_id")
def path(self, **kwargs) -> str:
"""Returns the API endpoint path for stream, from `endpoint` class attribute."""
return self.endpoint
def next_page_token(self, response: requests.Response) -> Optional[Mapping[str, Any]]:
return None
def parse_response(
self, response: requests.Response, stream_state: Mapping[str, Any] = None, stream_slice: Mapping[str, Any] = None
) -> Iterable[Mapping]:
return [response.json()]
def should_retry(self, response: requests.Response) -> bool:
if response.status_code == 429:
error_message = (
f"Stream {self.name}: LinkedIn API requests are rate limited. "
f"Rate limits specify the maximum number of API calls that can be made in a 24 hour period. "
f"These limits reset at midnight UTC every day. "
f"You can find more information here https://docs.airbyte.com/integrations/sources/linkedin-pages. "
f"Also quotas and usage are here: https://www.linkedin.com/developers/apps."
)
self.logger.error(error_message)
return super().should_retry(response)
class OrganizationLookup(LinkedinPagesStream):
def path(self, stream_state: Mapping[str, Any], **kwargs) -> MutableMapping[str, Any]:
path = f"organizations/{self.org}"
return path
class FollowerStatistics(LinkedinPagesStream):
def path(self, stream_state: Mapping[str, Any], **kwargs) -> MutableMapping[str, Any]:
path = f"organizationalEntityFollowerStatistics?q=organizationalEntity&organizationalEntity=urn:li:organization:{self.org}"
return path
def parse_response(
self, response: requests.Response, stream_state: Mapping[str, Any] = None, stream_slice: Mapping[str, Any] = None
) -> Iterable[Mapping]:
yield from response.json().get("elements")
class ShareStatistics(LinkedinPagesStream):
def path(self, stream_state: Mapping[str, Any], **kwargs) -> MutableMapping[str, Any]:
path = f"organizationalEntityShareStatistics?q=organizationalEntity&organizationalEntity=urn%3Ali%3Aorganization%3A{self.org}"
return path
def parse_response(
self, response: requests.Response, stream_state: Mapping[str, Any] = None, stream_slice: Mapping[str, Any] = None
) -> Iterable[Mapping]:
yield from response.json().get("elements")
class TotalFollowerCount(LinkedinPagesStream):
def path(self, stream_state: Mapping[str, Any], **kwargs) -> MutableMapping[str, Any]:
path = f"networkSizes/urn:li:organization:{self.org}?edgeType=CompanyFollowedByMember"
return path
class SourceLinkedinPages(AbstractSource):
"""
Abstract Source inheritance, provides:
- implementation for `check` connector's connectivity
- implementation to call each stream with it's input parameters.
"""
@classmethod
def get_authenticator(cls, config: Mapping[str, Any]) -> TokenAuthenticator:
"""
Validate input parameters and generate a necessary Authentication object
This connectors support 2 auth methods:
1) direct access token with TTL = 2 months
2) refresh token (TTL = 1 year) which can be converted to access tokens
Every new refresh revokes all previous access tokens q
"""
auth_method = config.get("credentials", {}).get("auth_method")
if not auth_method or auth_method == "access_token":
# support of backward compatibility with old exists configs
access_token = config["credentials"]["access_token"] if auth_method else config["access_token"]
return TokenAuthenticator(token=access_token)
elif auth_method == "oAuth2.0":
return Oauth2Authenticator(
token_refresh_endpoint="https://www.linkedin.com/oauth/v2/accessToken",
client_id=config["credentials"]["client_id"],
client_secret=config["credentials"]["client_secret"],
refresh_token=config["credentials"]["refresh_token"],
)
raise Exception("incorrect input parameters")
def check_connection(self, logger: AirbyteLogger, config: Mapping[str, Any]) -> Tuple[bool, any]:
# RUN $ python main.py check --config secrets/config.json
"""
Testing connection availability for the connector.
:: for this check method the Customer must have the "r_liteprofile" scope enabled.
:: more info: https://docs.microsoft.com/linkedin/consumer/integrations/self-serve/sign-in-with-linkedin
"""
config["authenticator"] = self.get_authenticator(config)
stream = OrganizationLookup(config)
stream.records_limit = 1
try:
next(stream.read_records(sync_mode=SyncMode.full_refresh), None)
return True, None
except Exception as e:
return False, e
# RUN: $ python main.py read --config secrets/config.json --catalog integration_tests/configured_catalog.json
def streams(self, config: Mapping[str, Any]) -> List[Stream]:
config["authenticator"] = self.get_authenticator(config)
return [
OrganizationLookup(config),
FollowerStatistics(config),
ShareStatistics(config),
TotalFollowerCount(config),
ShareStatistics(config),
TotalFollowerCount(config),
]
|
0a6fd93c95c3ee3d805ac3c6d91eaa59f09dcb1e
|
bf8d344b17e2ff9b7e38ad9597d5ce0e3d4da062
|
/ppdet/modeling/backbones/resnet.py
|
3b9508c49f932ffa34f53a946224ed8d7a3ae564
|
[
"Apache-2.0"
] |
permissive
|
PaddlePaddle/PaddleDetection
|
e7e0f40bef75a4e0b6dcbacfafa7eb1969e44961
|
bd83b98342b0a6bc8d8dcd5936233aeda1e32167
|
refs/heads/release/2.6
| 2023-08-31T07:04:15.357051
| 2023-08-18T02:24:45
| 2023-08-18T02:24:45
| 217,475,193
| 12,523
| 3,096
|
Apache-2.0
| 2023-09-10T10:05:56
| 2019-10-25T07:21:14
|
Python
|
UTF-8
|
Python
| false
| false
| 19,872
|
py
|
resnet.py
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
from numbers import Integral
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
from ppdet.core.workspace import register, serializable
from paddle.regularizer import L2Decay
from paddle.nn.initializer import Uniform
from paddle import ParamAttr
from paddle.nn.initializer import Constant
from paddle.vision.ops import DeformConv2D
from .name_adapter import NameAdapter
from ..shape_spec import ShapeSpec
__all__ = ['ResNet', 'Res5Head', 'Blocks', 'BasicBlock', 'BottleNeck']
ResNet_cfg = {
18: [2, 2, 2, 2],
34: [3, 4, 6, 3],
50: [3, 4, 6, 3],
101: [3, 4, 23, 3],
152: [3, 8, 36, 3],
}
class ConvNormLayer(nn.Layer):
def __init__(self,
ch_in,
ch_out,
filter_size,
stride,
groups=1,
act=None,
norm_type='bn',
norm_decay=0.,
freeze_norm=True,
lr=1.0,
dcn_v2=False):
super(ConvNormLayer, self).__init__()
assert norm_type in ['bn', 'sync_bn']
self.norm_type = norm_type
self.act = act
self.dcn_v2 = dcn_v2
if not self.dcn_v2:
self.conv = nn.Conv2D(
in_channels=ch_in,
out_channels=ch_out,
kernel_size=filter_size,
stride=stride,
padding=(filter_size - 1) // 2,
groups=groups,
weight_attr=ParamAttr(learning_rate=lr),
bias_attr=False)
else:
self.offset_channel = 2 * filter_size**2
self.mask_channel = filter_size**2
self.conv_offset = nn.Conv2D(
in_channels=ch_in,
out_channels=3 * filter_size**2,
kernel_size=filter_size,
stride=stride,
padding=(filter_size - 1) // 2,
weight_attr=ParamAttr(initializer=Constant(0.)),
bias_attr=ParamAttr(initializer=Constant(0.)))
self.conv = DeformConv2D(
in_channels=ch_in,
out_channels=ch_out,
kernel_size=filter_size,
stride=stride,
padding=(filter_size - 1) // 2,
dilation=1,
groups=groups,
weight_attr=ParamAttr(learning_rate=lr),
bias_attr=False)
norm_lr = 0. if freeze_norm else lr
param_attr = ParamAttr(
learning_rate=norm_lr,
regularizer=L2Decay(norm_decay),
trainable=False if freeze_norm else True)
bias_attr = ParamAttr(
learning_rate=norm_lr,
regularizer=L2Decay(norm_decay),
trainable=False if freeze_norm else True)
global_stats = True if freeze_norm else None
if norm_type in ['sync_bn', 'bn']:
self.norm = nn.BatchNorm2D(
ch_out,
weight_attr=param_attr,
bias_attr=bias_attr,
use_global_stats=global_stats)
norm_params = self.norm.parameters()
if freeze_norm:
for param in norm_params:
param.stop_gradient = True
def forward(self, inputs):
if not self.dcn_v2:
out = self.conv(inputs)
else:
offset_mask = self.conv_offset(inputs)
offset, mask = paddle.split(
offset_mask,
num_or_sections=[self.offset_channel, self.mask_channel],
axis=1)
mask = F.sigmoid(mask)
out = self.conv(inputs, offset, mask=mask)
if self.norm_type in ['bn', 'sync_bn']:
out = self.norm(out)
if self.act:
out = getattr(F, self.act)(out)
return out
class SELayer(nn.Layer):
def __init__(self, ch, reduction_ratio=16):
super(SELayer, self).__init__()
self.pool = nn.AdaptiveAvgPool2D(1)
stdv = 1.0 / math.sqrt(ch)
c_ = ch // reduction_ratio
self.squeeze = nn.Linear(
ch,
c_,
weight_attr=paddle.ParamAttr(initializer=Uniform(-stdv, stdv)),
bias_attr=True)
stdv = 1.0 / math.sqrt(c_)
self.extract = nn.Linear(
c_,
ch,
weight_attr=paddle.ParamAttr(initializer=Uniform(-stdv, stdv)),
bias_attr=True)
def forward(self, inputs):
out = self.pool(inputs)
out = paddle.squeeze(out, axis=[2, 3])
out = self.squeeze(out)
out = F.relu(out)
out = self.extract(out)
out = F.sigmoid(out)
out = paddle.unsqueeze(out, axis=[2, 3])
scale = out * inputs
return scale
class BasicBlock(nn.Layer):
expansion = 1
def __init__(self,
ch_in,
ch_out,
stride,
shortcut,
variant='b',
groups=1,
base_width=64,
lr=1.0,
norm_type='bn',
norm_decay=0.,
freeze_norm=True,
dcn_v2=False,
std_senet=False):
super(BasicBlock, self).__init__()
assert groups == 1 and base_width == 64, 'BasicBlock only supports groups=1 and base_width=64'
self.shortcut = shortcut
if not shortcut:
if variant == 'd' and stride == 2:
self.short = nn.Sequential()
self.short.add_sublayer(
'pool',
nn.AvgPool2D(
kernel_size=2, stride=2, padding=0, ceil_mode=True))
self.short.add_sublayer(
'conv',
ConvNormLayer(
ch_in=ch_in,
ch_out=ch_out,
filter_size=1,
stride=1,
norm_type=norm_type,
norm_decay=norm_decay,
freeze_norm=freeze_norm,
lr=lr))
else:
self.short = ConvNormLayer(
ch_in=ch_in,
ch_out=ch_out,
filter_size=1,
stride=stride,
norm_type=norm_type,
norm_decay=norm_decay,
freeze_norm=freeze_norm,
lr=lr)
self.branch2a = ConvNormLayer(
ch_in=ch_in,
ch_out=ch_out,
filter_size=3,
stride=stride,
act='relu',
norm_type=norm_type,
norm_decay=norm_decay,
freeze_norm=freeze_norm,
lr=lr)
self.branch2b = ConvNormLayer(
ch_in=ch_out,
ch_out=ch_out,
filter_size=3,
stride=1,
act=None,
norm_type=norm_type,
norm_decay=norm_decay,
freeze_norm=freeze_norm,
lr=lr,
dcn_v2=dcn_v2)
self.std_senet = std_senet
if self.std_senet:
self.se = SELayer(ch_out)
def forward(self, inputs):
out = self.branch2a(inputs)
out = self.branch2b(out)
if self.std_senet:
out = self.se(out)
if self.shortcut:
short = inputs
else:
short = self.short(inputs)
out = paddle.add(x=out, y=short)
out = F.relu(out)
return out
class BottleNeck(nn.Layer):
expansion = 4
def __init__(self,
ch_in,
ch_out,
stride,
shortcut,
variant='b',
groups=1,
base_width=4,
lr=1.0,
norm_type='bn',
norm_decay=0.,
freeze_norm=True,
dcn_v2=False,
std_senet=False):
super(BottleNeck, self).__init__()
if variant == 'a':
stride1, stride2 = stride, 1
else:
stride1, stride2 = 1, stride
# ResNeXt
width = int(ch_out * (base_width / 64.)) * groups
self.branch2a = ConvNormLayer(
ch_in=ch_in,
ch_out=width,
filter_size=1,
stride=stride1,
groups=1,
act='relu',
norm_type=norm_type,
norm_decay=norm_decay,
freeze_norm=freeze_norm,
lr=lr)
self.branch2b = ConvNormLayer(
ch_in=width,
ch_out=width,
filter_size=3,
stride=stride2,
groups=groups,
act='relu',
norm_type=norm_type,
norm_decay=norm_decay,
freeze_norm=freeze_norm,
lr=lr,
dcn_v2=dcn_v2)
self.branch2c = ConvNormLayer(
ch_in=width,
ch_out=ch_out * self.expansion,
filter_size=1,
stride=1,
groups=1,
norm_type=norm_type,
norm_decay=norm_decay,
freeze_norm=freeze_norm,
lr=lr)
self.shortcut = shortcut
if not shortcut:
if variant == 'd' and stride == 2:
self.short = nn.Sequential()
self.short.add_sublayer(
'pool',
nn.AvgPool2D(
kernel_size=2, stride=2, padding=0, ceil_mode=True))
self.short.add_sublayer(
'conv',
ConvNormLayer(
ch_in=ch_in,
ch_out=ch_out * self.expansion,
filter_size=1,
stride=1,
norm_type=norm_type,
norm_decay=norm_decay,
freeze_norm=freeze_norm,
lr=lr))
else:
self.short = ConvNormLayer(
ch_in=ch_in,
ch_out=ch_out * self.expansion,
filter_size=1,
stride=stride,
norm_type=norm_type,
norm_decay=norm_decay,
freeze_norm=freeze_norm,
lr=lr)
self.std_senet = std_senet
if self.std_senet:
self.se = SELayer(ch_out * self.expansion)
def forward(self, inputs):
out = self.branch2a(inputs)
out = self.branch2b(out)
out = self.branch2c(out)
if self.std_senet:
out = self.se(out)
if self.shortcut:
short = inputs
else:
short = self.short(inputs)
out = paddle.add(x=out, y=short)
out = F.relu(out)
return out
class Blocks(nn.Layer):
def __init__(self,
block,
ch_in,
ch_out,
count,
name_adapter,
stage_num,
variant='b',
groups=1,
base_width=64,
lr=1.0,
norm_type='bn',
norm_decay=0.,
freeze_norm=True,
dcn_v2=False,
std_senet=False):
super(Blocks, self).__init__()
self.blocks = []
for i in range(count):
conv_name = name_adapter.fix_layer_warp_name(stage_num, count, i)
layer = self.add_sublayer(
conv_name,
block(
ch_in=ch_in,
ch_out=ch_out,
stride=2 if i == 0 and stage_num != 2 else 1,
shortcut=False if i == 0 else True,
variant=variant,
groups=groups,
base_width=base_width,
lr=lr,
norm_type=norm_type,
norm_decay=norm_decay,
freeze_norm=freeze_norm,
dcn_v2=dcn_v2,
std_senet=std_senet))
self.blocks.append(layer)
if i == 0:
ch_in = ch_out * block.expansion
def forward(self, inputs):
block_out = inputs
for block in self.blocks:
block_out = block(block_out)
return block_out
@register
@serializable
class ResNet(nn.Layer):
__shared__ = ['norm_type']
def __init__(self,
depth=50,
ch_in=64,
variant='b',
lr_mult_list=[1.0, 1.0, 1.0, 1.0],
groups=1,
base_width=64,
norm_type='bn',
norm_decay=0,
freeze_norm=True,
freeze_at=0,
return_idx=[0, 1, 2, 3],
dcn_v2_stages=[-1],
num_stages=4,
std_senet=False):
"""
Residual Network, see https://arxiv.org/abs/1512.03385
Args:
depth (int): ResNet depth, should be 18, 34, 50, 101, 152.
ch_in (int): output channel of first stage, default 64
variant (str): ResNet variant, supports 'a', 'b', 'c', 'd' currently
lr_mult_list (list): learning rate ratio of different resnet stages(2,3,4,5),
lower learning rate ratio is need for pretrained model
got using distillation(default as [1.0, 1.0, 1.0, 1.0]).
groups (int): group convolution cardinality
base_width (int): base width of each group convolution
norm_type (str): normalization type, 'bn', 'sync_bn' or 'affine_channel'
norm_decay (float): weight decay for normalization layer weights
freeze_norm (bool): freeze normalization layers
freeze_at (int): freeze the backbone at which stage
return_idx (list): index of the stages whose feature maps are returned
dcn_v2_stages (list): index of stages who select deformable conv v2
num_stages (int): total num of stages
std_senet (bool): whether use senet, default True
"""
super(ResNet, self).__init__()
self._model_type = 'ResNet' if groups == 1 else 'ResNeXt'
assert num_stages >= 1 and num_stages <= 4
self.depth = depth
self.variant = variant
self.groups = groups
self.base_width = base_width
self.norm_type = norm_type
self.norm_decay = norm_decay
self.freeze_norm = freeze_norm
self.freeze_at = freeze_at
if isinstance(return_idx, Integral):
return_idx = [return_idx]
assert max(return_idx) < num_stages, \
'the maximum return index must smaller than num_stages, ' \
'but received maximum return index is {} and num_stages ' \
'is {}'.format(max(return_idx), num_stages)
self.return_idx = return_idx
self.num_stages = num_stages
assert len(lr_mult_list) == 4, \
"lr_mult_list length must be 4 but got {}".format(len(lr_mult_list))
if isinstance(dcn_v2_stages, Integral):
dcn_v2_stages = [dcn_v2_stages]
assert max(dcn_v2_stages) < num_stages
if isinstance(dcn_v2_stages, Integral):
dcn_v2_stages = [dcn_v2_stages]
assert max(dcn_v2_stages) < num_stages
self.dcn_v2_stages = dcn_v2_stages
block_nums = ResNet_cfg[depth]
na = NameAdapter(self)
conv1_name = na.fix_c1_stage_name()
if variant in ['c', 'd']:
conv_def = [
[3, ch_in // 2, 3, 2, "conv1_1"],
[ch_in // 2, ch_in // 2, 3, 1, "conv1_2"],
[ch_in // 2, ch_in, 3, 1, "conv1_3"],
]
else:
conv_def = [[3, ch_in, 7, 2, conv1_name]]
self.conv1 = nn.Sequential()
for (c_in, c_out, k, s, _name) in conv_def:
self.conv1.add_sublayer(
_name,
ConvNormLayer(
ch_in=c_in,
ch_out=c_out,
filter_size=k,
stride=s,
groups=1,
act='relu',
norm_type=norm_type,
norm_decay=norm_decay,
freeze_norm=freeze_norm,
lr=1.0))
self.ch_in = ch_in
ch_out_list = [64, 128, 256, 512]
block = BottleNeck if depth >= 50 else BasicBlock
self._out_channels = [block.expansion * v for v in ch_out_list]
self._out_strides = [4, 8, 16, 32]
self.res_layers = []
for i in range(num_stages):
lr_mult = lr_mult_list[i]
stage_num = i + 2
res_name = "res{}".format(stage_num)
res_layer = self.add_sublayer(
res_name,
Blocks(
block,
self.ch_in,
ch_out_list[i],
count=block_nums[i],
name_adapter=na,
stage_num=stage_num,
variant=variant,
groups=groups,
base_width=base_width,
lr=lr_mult,
norm_type=norm_type,
norm_decay=norm_decay,
freeze_norm=freeze_norm,
dcn_v2=(i in self.dcn_v2_stages),
std_senet=std_senet))
self.res_layers.append(res_layer)
self.ch_in = self._out_channels[i]
if freeze_at >= 0:
self._freeze_parameters(self.conv1)
for i in range(min(freeze_at + 1, num_stages)):
self._freeze_parameters(self.res_layers[i])
def _freeze_parameters(self, m):
for p in m.parameters():
p.stop_gradient = True
@property
def out_shape(self):
return [
ShapeSpec(
channels=self._out_channels[i], stride=self._out_strides[i])
for i in self.return_idx
]
def forward(self, inputs):
x = inputs['image']
conv1 = self.conv1(x)
x = F.max_pool2d(conv1, kernel_size=3, stride=2, padding=1)
outs = []
for idx, stage in enumerate(self.res_layers):
x = stage(x)
if idx in self.return_idx:
outs.append(x)
return outs
@register
class Res5Head(nn.Layer):
def __init__(self, depth=50):
super(Res5Head, self).__init__()
feat_in, feat_out = [1024, 512]
if depth < 50:
feat_in = 256
na = NameAdapter(self)
block = BottleNeck if depth >= 50 else BasicBlock
self.res5 = Blocks(
block, feat_in, feat_out, count=3, name_adapter=na, stage_num=5)
self.feat_out = feat_out if depth < 50 else feat_out * 4
@property
def out_shape(self):
return [ShapeSpec(
channels=self.feat_out,
stride=16, )]
def forward(self, roi_feat, stage=0):
y = self.res5(roi_feat)
return y
|
a13c98ba70920cc11bd3eae2bad46a5e652ece21
|
0577a46d8d28e1fd8636893bbdd2b18270bb8eb8
|
/chromium/third_party/blink/tools/blinkpy/w3c/directory_owners_extractor.py
|
ede6aa5047c25f82c0b7830525cc16e59af90169
|
[
"BSD-3-Clause",
"Apache-2.0",
"LGPL-2.0-or-later",
"MIT",
"GPL-1.0-or-later",
"LicenseRef-scancode-warranty-disclaimer",
"LGPL-2.1-only",
"GPL-2.0-only",
"LGPL-2.0-only",
"BSD-2-Clause",
"LicenseRef-scancode-other-copyleft"
] |
permissive
|
ric2b/Vivaldi-browser
|
388a328b4cb838a4c3822357a5529642f86316a5
|
87244f4ee50062e59667bf8b9ca4d5291b6818d7
|
refs/heads/master
| 2022-12-21T04:44:13.804535
| 2022-12-17T16:30:35
| 2022-12-17T16:30:35
| 86,637,416
| 166
| 41
|
BSD-3-Clause
| 2021-03-31T18:49:30
| 2017-03-29T23:09:05
| null |
UTF-8
|
Python
| false
| false
| 8,903
|
py
|
directory_owners_extractor.py
|
# Copyright 2017 The Chromium Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A limited finder & parser for Chromium OWNERS and DIR_METADATA files.
This module is intended to be used within web_tests/external and is
informative only. For authoritative uses, please rely on `git cl owners`.
For example, it does not support directives other than email addresses.
"""
import collections
import json
import re
from blinkpy.common.memoized import memoized
from blinkpy.common.path_finder import PathFinder
# Format of OWNERS files can be found at //src/third_party/depot_tools/owners.py
# In our use case (under external/wpt), we only process the first enclosing
# OWNERS file for any given path (i.e. always assuming "set noparent"), and we
# ignore "per-file:" lines, "file:" directives, etc.
#
# For DIR_METADATA files, we rely on the dirmd tool from depot_tools to parse
# them into a JSON blob.
# Recognizes 'X@Y' email addresses. Very simplistic. (from owners.py)
BASIC_EMAIL_REGEXP = r'^[\w\-\+\%\.]+\@[\w\-\+\%\.]+$'
class DirectoryOwnersExtractor(object):
def __init__(self, host):
self.filesystem = host.filesystem
self.finder = PathFinder(self.filesystem)
self.executive = host.executive
self.owner_map = None
def list_owners(self, changed_files):
"""Looks up the owners for the given set of changed files.
Args:
changed_files: A list of file paths relative to the repository root.
Returns:
A dict mapping tuples of owner email addresses to lists of
owned directories (paths relative to the root of web tests).
"""
email_map = collections.defaultdict(set)
external_root_owners = self.finder.path_from_web_tests(
'external', 'OWNERS')
for relpath in changed_files:
# Try to find the first *non-empty* OWNERS file.
absolute_path = self.finder.path_from_chromium_base(relpath)
owners = None
owners_file = self.find_owners_file(absolute_path)
while owners_file:
owners = self.extract_owners(owners_file)
if owners:
break
# Found an empty OWNERS file. Try again from the parent directory.
absolute_path = self.filesystem.dirname(
self.filesystem.dirname(owners_file))
owners_file = self.find_owners_file(absolute_path)
# Skip web_tests/external/OWNERS.
if not owners or owners_file == external_root_owners:
continue
owned_directory = self.filesystem.dirname(owners_file)
owned_directory_relpath = self.filesystem.relpath(
owned_directory, self.finder.web_tests_dir())
email_map[tuple(owners)].add(owned_directory_relpath)
return {
owners: sorted(owned_directories)
for owners, owned_directories in email_map.items()
}
def find_owners_file(self, start_path):
"""Finds the first enclosing OWNERS file for a given path.
Starting from the given path, walks up the directory tree until the
first OWNERS file is found or web_tests/external is reached.
Args:
start_path: A relative path from the root of the repository, or an
absolute path. The path can be a file or a directory.
Returns:
The absolute path to the first OWNERS file found; None if not found
or if start_path is outside of web_tests/external.
"""
abs_start_path = (start_path if self.filesystem.isabs(start_path) else
self.finder.path_from_chromium_base(start_path))
directory = (abs_start_path if self.filesystem.isdir(abs_start_path)
else self.filesystem.dirname(abs_start_path))
external_root = self.finder.path_from_web_tests('external')
if not directory.startswith(external_root):
return None
# Stop at web_tests, which is the parent of external_root.
while directory != self.finder.web_tests_dir():
owners_file = self.filesystem.join(directory, 'OWNERS')
if self.filesystem.isfile(
self.finder.path_from_chromium_base(owners_file)):
return owners_file
directory = self.filesystem.dirname(directory)
return None
def extract_owners(self, owners_file):
"""Extracts owners from an OWNERS file.
Args:
owners_file: An absolute path to an OWNERS file.
Returns:
A list of valid owners (email addresses).
"""
contents = self._read_text_file(owners_file)
email_regexp = re.compile(BASIC_EMAIL_REGEXP)
addresses = []
for line in contents.splitlines():
line = line.strip()
if email_regexp.match(line):
addresses.append(line)
return addresses
def extract_component(self, metadata_file):
"""Extracts the component from an DIR_METADATA file.
Args:
metadata_file: An absolute path to an DIR_METADATA file.
Returns:
A string, or None if not found.
"""
dir_metadata = self._read_dir_metadata(metadata_file)
if dir_metadata and dir_metadata.component:
return dir_metadata.component
return None
def is_wpt_notify_enabled(self, metadata_file):
"""Checks if the DIR_METADATA file enables WPT-NOTIFY.
Args:
metadata_file: An absolute path to an DIR_METADATA file.
Returns:
A boolean.
"""
dir_metadata = self._read_dir_metadata(metadata_file)
return dir_metadata and dir_metadata.should_notify
@memoized
def _read_text_file(self, path):
return self.filesystem.read_text_file(path)
@memoized
def _read_dir_metadata(self, path):
"""Read the content from a path.
Args:
path: An absolute path.
Returns:
A WPTDirMetadata object, or None if not found.
"""
print('_read_dir_metadata %s' % path)
dir_path = self.filesystem.dirname(path)
# dirmd starts with an absolute directory path, `dir_path`, traverses all
# parent directories and stops at `root_path` to find the first available DIR_METADATA
# file. `root_path` is the web_tests directory.
json_data = self.executive.run_command([
self.finder.path_from_depot_tools_base('dirmd'),
'read',
'-form', 'sparse',
dir_path,
])
try:
data = json.loads(json_data)
except ValueError:
return None
# Paths in the dirmd output are relative to the repo root.
repo_root = self.finder.path_from_chromium_base()
relative_path = self.filesystem.relpath(dir_path, repo_root)
return WPTDirMetadata(data, relative_path)
class WPTDirMetadata(object):
def __init__(self, data, path):
"""Constructor for WPTDirMetadata.
Args:
data: The output of `dirmd` in _read_dir_metadata; e.g.
{
"dirs":{
"tools/binary_size/libsupersize/testdata/mock_source_directory/base":{
"monorail":{
"project":"chromium",
"component":"Blink>Internal"
},
"teamEmail":"team@chromium.org",
"os":"LINUX",
"wpt":{
"notify":"YES"
}
}
}
}
path: The relative directory path of the DIR_METADATA to the web_tests directory;
see `relative_path` in _read_dir_metadata.
"""
self._data = data
self._path = path
def _get_content(self):
return self._data['dirs'][self._path]
def _is_empty(self):
return len(self._get_content()) == 0
@property
def team_email(self):
if self._is_empty():
return None
# Only returns a single email.
return self._get_content()['teamEmail']
@property
def component(self):
if self._is_empty():
return None
return self._get_content()['monorail']['component']
@property
def should_notify(self):
if self._is_empty():
return None
notify = self._get_content().get('wpt', {}).get('notify')
# The value of `notify` is one of ['TRINARY_UNSPECIFIED', 'YES', 'NO'].
# Assume that users opt out by default; return True only when notify is 'YES'.
return notify == 'YES'
|
25bf794bbea0b621f1c4a884cf56bd31edf9e70d
|
eb76f82c474a327759888306910ccf584aee7ba1
|
/ocrd/ocrd/decorators/ocrd_cli_options.py
|
5723471ce74f433f06a5c8097aab5571ca4f53ac
|
[
"Apache-2.0"
] |
permissive
|
OCR-D/core
|
cc2aa388f43823529437924c1d653e48387a180a
|
5d627396a5dfe9abcf11840608c87c94e1353647
|
refs/heads/master
| 2023-08-19T01:39:53.219760
| 2023-08-18T14:37:02
| 2023-08-18T14:37:02
| 112,337,283
| 112
| 26
|
Apache-2.0
| 2023-09-13T16:12:03
| 2017-11-28T13:13:24
|
Python
|
UTF-8
|
Python
| false
| false
| 1,972
|
py
|
ocrd_cli_options.py
|
import click
from click import option, Path
from .parameter_option import parameter_option, parameter_override_option
from .loglevel_option import loglevel_option
from ocrd_network import (
DatabaseParamType,
ServerAddressParamType,
QueueServerParamType
)
def ocrd_cli_options(f):
"""
Implement MP CLI.
Usage::
import ocrd_click_cli from ocrd.utils
@click.command()
@ocrd_click_cli
def cli(mets_url):
print(mets_url)
"""
# XXX Note that the `--help` output is statically generate_processor_help
params = [
option('-m', '--mets', default="mets.xml"),
option('-w', '--working-dir'),
# TODO OCR-D/core#274
# option('-I', '--input-file-grp', required=True),
# option('-O', '--output-file-grp', required=True),
option('-I', '--input-file-grp', default='INPUT'),
option('-O', '--output-file-grp', default='OUTPUT'),
option('-g', '--page-id'),
option('--overwrite', is_flag=True, default=False),
option('--profile', is_flag=True, default=False),
option('--profile-file', type=Path(dir_okay=False, writable=True)),
parameter_option,
parameter_override_option,
loglevel_option,
option('--type', 'agent_type', type=click.Choice(['worker', 'server'])),
option('--address', 'agent_address', type=ServerAddressParamType()),
option('--queue', type=QueueServerParamType()),
option('--database', type=DatabaseParamType()),
option('-C', '--show-resource'),
option('-L', '--list-resources', is_flag=True, default=False),
option('-J', '--dump-json', is_flag=True, default=False),
option('-D', '--dump-module-dir', is_flag=True, default=False),
option('-h', '--help', is_flag=True, default=False),
option('-V', '--version', is_flag=True, default=False),
]
for param in params:
param(f)
return f
|
0c05b4f18fcf53e1a404be87041d3826433543de
|
554718851656376ad2bceb282de30459167ffeb2
|
/tests/core/test_state_store.py
|
bb0d0e50980862ffb79a2d7e61c357edc310c83a
|
[
"Apache-2.0"
] |
permissive
|
awslabs/sagemaker-debugger
|
d6ae6a6177a6cb457972772e2b3021e8a9dcc621
|
37ecf0aaeb24ab2adbe7f0ad664d0e50fa4154f2
|
refs/heads/master
| 2023-09-05T05:20:02.458427
| 2023-04-20T20:48:11
| 2023-04-20T20:48:11
| 222,554,670
| 162
| 89
|
Apache-2.0
| 2023-08-23T14:31:27
| 2019-11-18T22:12:36
|
Python
|
UTF-8
|
Python
| false
| false
| 3,616
|
py
|
test_state_store.py
|
# Standard Library
import json
import os
import shutil
# First Party
from smdebug.core.state_store import StateStore
def setup_test():
try:
shutil.rmtree("checkpoints_test_dir/")
except:
pass
# create the checkpoints directory
os.mkdir("checkpoints_test_dir/")
dir_path = os.path.abspath("checkpoints_test_dir")
# create the config file and set the corresponding environment variable.
mock_config = {"LocalPath": dir_path}
with open("mock_config.json", "w") as f:
json.dump(mock_config, f)
os.environ["CHECKPOINT_CONFIG_FILE_PATH"] = os.path.abspath("mock_config.json")
# create the metadata file inside the checkpoints directory.
mock_metadata = [
{
"training-run": "",
"latest-global-step-saved": "",
"latest-global-step-seen": "",
"latest-mode-step": "",
}
]
with open(dir_path + "/metadata.json", "w") as f:
json.dump(mock_metadata, f)
# Write another metadata.json after s3 uploader has uploaded the file.
# SageMaker renames this file metadata.json.sagemaker-uploaded.
with open(dir_path + "/metadata.json.sagemaker-uploaded", "w") as f1:
json.dump(mock_metadata, f1)
return dir_path, os.path.abspath("mock_config.json")
def cleanup(checkpoints_dir_path, config_path):
shutil.rmtree(checkpoints_dir_path)
os.remove(config_path)
def test_is_checkpoint_updated():
s1 = StateStore()
# There is no checkpoint_dir. is_checkpoint_updated should return False.
assert s1.is_checkpoint_updated() is False
# call setup_test to create checkpoints_dir and metadata file.
checkpoints_dir_path, config_path = setup_test()
s2 = StateStore()
# checkpoints_dir only has metadata.json. So no checkpoints file was created or updated. It should return false.
assert s2.is_checkpoint_updated() is False
s2.update_state("test-state1")
# checkpoints_dir still has only metadata.json. It should return false.
assert s2.is_checkpoint_updated() is False
os.mkdir(s2._checkpoint_dir + "/subdir1")
with open(s2._checkpoint_dir + "/subdir1/checkpoint_test1.txt", "w") as f:
f.write("checkpoint-test-string-1")
f.flush()
os.fsync(f)
# the checkpoint update time is greater than _checkpoint_update_timestamp. is_checkpoint_updated should return true.
chkpnt_fname = s2._checkpoint_dir + "/subdir1/checkpoint_test1.txt"
assert s2.is_checkpoint_updated()
s2.update_state("test-state2")
# the state_file has been updated. The lastest checkpoint update time is lesser than _checkpoint_update_timestamp.
# is_checkpoint_updated should return false.
assert s2.is_checkpoint_updated() is False
with open(s2._checkpoint_dir + "/subdir1/checkpoint_test1.txt", "a") as f:
f.write("checkpoint-test-string-2")
f.flush()
os.fsync(f)
# A checkpoint file has been updated. The checkpoint update time is greater than _checkpoint_update_timestamp.
# is_checkpoint_updated should return true.
assert s2.is_checkpoint_updated()
s2.update_state("test-state3")
os.mkdir(s2._checkpoint_dir + "/subdir2")
with open(s2._checkpoint_dir + "/subdir2/checkpoint_test2.txt", "w") as f:
f.write("checkpoint-test-string-3")
f.flush()
os.fsync(f)
# A new checkpoint file has been created. The checkpoint update time is greater than _checkpoint_update_timestamp.
# is_checkpoint_updated should return true.
assert s2.is_checkpoint_updated()
cleanup(s2._checkpoint_dir, config_path)
|
40c54211850af6316bbfc5b7676be06b9bf7188a
|
6c4518d19073edefa988253ab978dce804a86fd0
|
/tests/api_tests/message_format_tests/condenser_api_tests/test_get_witness_schedule.py
|
dc693008b4071a352102d51b0000eb109c4ce4a4
|
[
"LicenseRef-scancode-warranty-disclaimer",
"MIT"
] |
permissive
|
openhive-network/hive
|
9c975d7f27729424306ae46a8971a1cb50d9bade
|
faa8b1d33aead9e555b98adb78a5183634d9f8f5
|
refs/heads/master
| 2023-08-08T10:29:51.616373
| 2023-04-04T22:02:30
| 2023-04-05T16:48:29
| 248,639,972
| 348
| 112
|
NOASSERTION
| 2021-08-28T22:34:07
| 2020-03-20T01:18:32
|
C++
|
UTF-8
|
Python
| false
| false
| 434
|
py
|
test_get_witness_schedule.py
|
from hive_local_tools import run_for
@run_for('testnet', 'mainnet_5m', 'live_mainnet')
def test_get_witness_schedule(node):
node.api.condenser.get_witness_schedule()
@run_for('testnet', 'mainnet_5m', 'live_mainnet')
def test_get_witness_schedule_current(node):
node.api.condenser.get_witness_schedule(False)
@run_for('testnet')
def test_get_witness_schedule_future(node):
node.api.condenser.get_witness_schedule(True)
|
7fd3a88cb3ee9499ab90ebf44232b28f6af49cb3
|
7e6e932fc366be0258d4ff408b59c1e752fa149e
|
/adafruit_ble/services/nordic.py
|
569ace9ccff78cf22b161913baf6a78230401a8d
|
[
"MIT"
] |
permissive
|
adafruit/Adafruit_CircuitPython_BLE
|
64ae4dacee391c4be8e81f8758db777173b555ad
|
f32e7b953ec93f71fcb292074b6d25c7c4355a88
|
refs/heads/main
| 2023-05-29T17:28:15.578870
| 2023-05-14T17:00:32
| 2023-05-24T01:17:19
| 163,691,781
| 114
| 61
|
MIT
| 2023-04-04T15:44:02
| 2018-12-31T19:05:01
|
Python
|
UTF-8
|
Python
| false
| false
| 3,263
|
py
|
nordic.py
|
# SPDX-FileCopyrightText: 2019 Dan Halbert for Adafruit Industries
# SPDX-FileCopyrightText: 2019 Scott Shawcroft for Adafruit Industries
#
# SPDX-License-Identifier: MIT
"""
`nordic`
====================================================
This module provides Services used by Nordic Semiconductors.
"""
from __future__ import annotations
from . import Service
from ..uuid import VendorUUID
from ..characteristics.stream import StreamOut, StreamIn
try:
from typing import Optional, TYPE_CHECKING
if TYPE_CHECKING:
from circuitpython_typing import WriteableBuffer, ReadableBuffer
import _bleio
except ImportError:
pass
__version__ = "0.0.0+auto.0"
__repo__ = "https://github.com/adafruit/Adafruit_CircuitPython_BLE.git"
class UARTService(Service):
"""
Provide UART-like functionality via the Nordic NUS service.
See ``examples/ble_uart_echo_test.py`` for a usage example.
"""
# pylint: disable=no-member
uuid = VendorUUID("6E400001-B5A3-F393-E0A9-E50E24DCCA9E")
_server_tx = StreamOut(
uuid=VendorUUID("6E400003-B5A3-F393-E0A9-E50E24DCCA9E"),
timeout=1.0,
buffer_size=64,
)
_server_rx = StreamIn(
uuid=VendorUUID("6E400002-B5A3-F393-E0A9-E50E24DCCA9E"),
timeout=1.0,
buffer_size=64,
)
def __init__(self, service: Optional[_bleio.Service] = None) -> None:
super().__init__(service=service)
self.connectable = True
if not service:
self._rx = self._server_rx
self._tx = self._server_tx
else:
# If we're a client then swap the characteristics we use.
self._tx = self._server_rx
self._rx = self._server_tx
def read(self, nbytes: Optional[int] = None) -> Optional[bytes]:
"""
Read characters. If ``nbytes`` is specified then read at most that many bytes.
Otherwise, read everything that arrives until the connection times out.
Providing the number of bytes expected is highly recommended because it will be faster.
:return: Data read
:rtype: bytes or None
"""
return self._rx.read(nbytes)
def readinto(
self, buf: WriteableBuffer, nbytes: Optional[int] = None
) -> Optional[int]:
"""
Read bytes into the ``buf``. If ``nbytes`` is specified then read at most
that many bytes. Otherwise, read at most ``len(buf)`` bytes.
:return: number of bytes read and stored into ``buf``
:rtype: int or None (on a non-blocking error)
"""
return self._rx.readinto(buf, nbytes)
def readline(self) -> Optional[bytes]:
"""
Read a line, ending in a newline character.
:return: the line read
:rtype: bytes or None
"""
return self._rx.readline()
@property
def in_waiting(self) -> int:
"""The number of bytes in the input buffer, available to be read."""
return self._rx.in_waiting
def reset_input_buffer(self) -> None:
"""Discard any unread characters in the input buffer."""
self._rx.reset_input_buffer()
def write(self, buf: ReadableBuffer) -> None:
"""Write a buffer of bytes."""
self._tx.write(buf)
|
56f7ea1f140e94d89615cdc941941397019d0243
|
f305f84ea6f721c2391300f0a60e21d2ce14f2a5
|
/23_设计类/pandas/数据操作/177. 第N高的薪水.py
|
f68efa011a8095b9c3a632d20866764a65692d76
|
[] |
no_license
|
981377660LMT/algorithm-study
|
f2ada3e6959338ae1bc21934a84f7314a8ecff82
|
7e79e26bb8f641868561b186e34c1127ed63c9e0
|
refs/heads/master
| 2023-09-01T18:26:16.525579
| 2023-09-01T12:21:58
| 2023-09-01T12:21:58
| 385,861,235
| 225
| 24
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 727
|
py
|
177. 第N高的薪水.py
|
import pandas as pd
# 查询 Employee 表中第 n 高的工资。如果没有第 n 个最高工资,查询结果应该为 null 。
# Employee table:
# +----+--------+
# | id | salary |
# +----+--------+
# | 1 | 100 |
# | 2 | 200 |
# | 3 | 300 |
# +----+--------+
# n = 2
#
# 输出:
# +------------------------+
# | getNthHighestSalary(2) |
# +------------------------+
# | 200 |
# +------------------------+
def nth_highest_salary(employee: pd.DataFrame, N: int) -> pd.DataFrame:
df = employee[["salary"]].drop_duplicates()
if len(df) < N:
return pd.DataFrame({f"getNthHighestSalary({N})": [None]})
return df.sort_values(by=["salary"], ascending=False).iloc[N - 1 : N]
|
ffa8e27952a574214876b1303a1043f300eea0a7
|
67cc5db4593e2cdd109e589e13fb07074bcff5d9
|
/tests/enumerator_test.py
|
6d7658ddaf12840efa2ad3695875db2da6ab2ebc
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
spcl/dace
|
39849b1488e8f59f880fc0e2572687556c51847d
|
c5ca99ad37e7ceef6da71026c3c8bb579f64117f
|
refs/heads/master
| 2023-08-31T10:45:09.480018
| 2023-08-30T06:05:10
| 2023-08-30T06:05:10
| 172,703,996
| 402
| 114
|
BSD-3-Clause
| 2023-09-14T15:18:29
| 2019-02-26T12:05:50
|
Python
|
UTF-8
|
Python
| false
| false
| 4,415
|
py
|
enumerator_test.py
|
# Copyright 2019-2021 ETH Zurich and the DaCe authors. All rights reserved.
from dace.transformation.estimator.enumeration.brute_force_enumerator import BruteForceEnumerator
from dace.transformation.estimator.enumeration.connected_enumerator import ConnectedEnumerator
import dace
import numpy as np
import pytest
from dace.transformation.estimator import GreedyEnumerator
from dace.transformation.subgraph.composite import CompositeFusion
from dace.sdfg.graph import SubgraphView
from dace.transformation.dataflow.reduce_expansion import ReduceExpansion
W = dace.symbol('W')
H = dace.symbol('H')
B = dace.symbol('B')
@dace.program
def p1(in1: dace.float32[W, H, B], in2: dace.float32[W, H], out: dace.float32[W, H]):
tmp1 = np.ndarray([W, H, B], dtype=dace.float32)
for i, j, k in dace.map[0:W, 0:H, 0:B]:
with dace.tasklet:
a << in1[i, j, k]
b << in2[i, j]
c >> tmp1[i, j, k]
c = a + b * 2
tmp2 = np.ndarray([W, H, B], dtype=dace.float32)
#tmp3 = np.ndarray([W, H], dtype=dace.float32)
for i, j, k in dace.map[0:W, 0:H, 0:B]:
with dace.tasklet:
a << tmp1[i, j, k]
c >> tmp2[i, j, k]
c = 3 * a
tmp3 = dace.reduce(lambda x, y: x + y, tmp1, axis=2, identity=0)
tmp4 = dace.reduce(lambda x, y: x + y, tmp2, axis=2, identity=0)
for i, j in dace.map[0:W, 0:H]:
with dace.tasklet:
a << tmp3[i, j]
b << tmp4[i, j]
c >> out[i, j]
c = a * 2 + b * 3 + 1
@pytest.mark.parametrize(["map_splits"], [[True], [False]])
def test_greedy(map_splits):
# Test diamond graph structure and ensure topologically correct enumeration
w = 30
h = 30
b = 20
A1 = np.random.rand(w, h, b).astype(np.float32)
A2 = np.random.rand(w, h).astype(np.float32)
ret = np.zeros([w, h], dtype=np.float32)
sdfg = p1.to_sdfg()
sdfg.simplify()
graph = sdfg.nodes()[0]
sdfg.apply_transformations_repeated(ReduceExpansion)
subgraph = SubgraphView(graph, graph.nodes())
composite = CompositeFusion()
composite.setup_match(subgraph)
composite.expansion_split = map_splits
cf = lambda sdfg, subgraph: composite.can_be_applied(sdfg, subgraph)
enum = GreedyEnumerator(sdfg, graph, subgraph, cf)
result = enum.list()
if map_splits:
assert len(result) == 1
else:
assert len(result) == 2
@pytest.mark.parametrize(["map_splits"], [[True], [False]])
def test_connected(map_splits):
# Test diamond graph structure and ensure topologically correct enumeration
w = 30
h = 30
b = 20
A1 = np.random.rand(w, h, b).astype(np.float32)
A2 = np.random.rand(w, h).astype(np.float32)
ret = np.zeros([w, h], dtype=np.float32)
sdfg = p1.to_sdfg()
sdfg.simplify()
graph = sdfg.nodes()[0]
sdfg.apply_transformations_repeated(ReduceExpansion)
subgraph = SubgraphView(graph, graph.nodes())
composite = CompositeFusion()
composite.setup_match(subgraph)
composite.expansion_split = map_splits
cf = lambda sdfg, subgraph: composite.can_be_applied(sdfg, subgraph)
enum = ConnectedEnumerator(sdfg, graph, subgraph, cf)
result = enum.list()
if map_splits:
assert len(result) == 14
else:
assert len(result) == 4
@pytest.mark.parametrize(["map_splits"], [[True], [False]])
def test_brute_force(map_splits):
# Test diamond graph structure and ensure topologically correct enumeration
w = 30
h = 30
b = 20
A1 = np.random.rand(w, h, b).astype(np.float32)
A2 = np.random.rand(w, h).astype(np.float32)
ret = np.zeros([w, h], dtype=np.float32)
sdfg = p1.to_sdfg()
sdfg.simplify()
graph = sdfg.nodes()[0]
sdfg.apply_transformations_repeated(ReduceExpansion)
subgraph = SubgraphView(graph, graph.nodes())
composite = CompositeFusion()
composite.setup_match(subgraph)
composite.expansion_split = map_splits
cf = lambda sdfg, subgraph: composite.can_be_applied(sdfg, subgraph)
enum = BruteForceEnumerator(sdfg, graph, subgraph, cf)
result = enum.list()
if map_splits:
assert len(result) == 15
else:
assert len(result) == 5
if __name__ == "__main__":
test_greedy(True)
test_greedy(False)
test_connected(True)
test_connected(False)
test_brute_force(True)
test_brute_force(False)
|
37314fceb32fea414bd2f7fabbd6b4ddcd1a617d
|
f4ceb49e5c7ff44964364a24838cb8049a9e82b1
|
/pangres/tests/test_yield_chunks.py
|
216fd54c277ca536ca7f3a5038d9315bb202cf7d
|
[
"Unlicense"
] |
permissive
|
ThibTrip/pangres
|
b71eea437b3d5fd38bd8d6c837f1fc14d68d3552
|
d78296ef6320b89372706fb98251e09ae914f0f7
|
refs/heads/master
| 2023-09-01T08:26:25.080628
| 2023-04-07T19:53:18
| 2023-04-07T19:53:18
| 237,447,411
| 204
| 16
|
Unlicense
| 2023-08-22T21:30:10
| 2020-01-31T14:33:02
|
Python
|
UTF-8
|
Python
| false
| false
| 4,793
|
py
|
test_yield_chunks.py
|
#!/usr/bin/env python
# coding: utf-8
# +
"""
This module tests we can get information back from
the upserted chunks when the parameter `yield_chunks`
is True. It also checks the integrity of the data.
"""
import math
import pandas as pd
from sqlalchemy import INT
# local imports
from pangres import aupsert, upsert
from pangres.examples import _TestsExampleTable
from pangres.tests.conftest import (adrop_table_between_tests, drop_table_between_tests,
sync_or_async_test, TableNames)
# -
# # Sync and async variants for tests
#
# (`run_test_foo`|`run_test_foo_async`) -> `test_foo`
# ## Insert values one by one
# +
@drop_table_between_tests(table_name=TableNames.WITH_YIELD)
def run_test_get_nb_rows(engine, schema):
# config
table_name = TableNames.WITH_YIELD
nb_rows, chunksize = 20, 3
nb_last_chunk = nb_rows % chunksize
nb_chunks = math.ceil(nb_rows / chunksize)
# MySQL does not want flexible text length in indices/PK
df = _TestsExampleTable.create_example_df(nb_rows=nb_rows)
# iterate over upsert results
# make sure we can extract the number of updated rows and that it is correct
iterator = upsert(con=engine, df=df, table_name=table_name, if_row_exists='update',
schema=schema, chunksize=chunksize, yield_chunks=True)
for ix, result in enumerate(iterator):
assert result.rowcount == (chunksize if ix != (nb_chunks - 1) else nb_last_chunk)
# verify the inserted data is as expected
# we sort the index for MySQL
df_db = _TestsExampleTable.read_from_db(engine=engine, schema=schema, table_name=table_name)
pd.testing.assert_frame_equal(df.sort_index(), df_db.sort_index())
@adrop_table_between_tests(table_name=TableNames.WITH_YIELD)
async def run_test_get_nb_rows_async(engine, schema):
# config
table_name = TableNames.WITH_YIELD
nb_rows, chunksize = 20, 3
nb_last_chunk = nb_rows % chunksize
nb_chunks = math.ceil(nb_rows / chunksize)
# MySQL does not want flexible text length in indices/PK
df = _TestsExampleTable.create_example_df(nb_rows=nb_rows)
# iterate over upsert results
# make sure we can extract the number of updated rows and that it is correct
async_gen = await aupsert(con=engine, df=df, table_name=table_name, if_row_exists='update',
schema=schema, chunksize=chunksize, yield_chunks=True)
# unlike the equivalent synchronous test, enumerate(async_generator) will not work
ix = 0
async for result in async_gen:
assert result.rowcount == (chunksize if ix != (nb_chunks - 1) else nb_last_chunk)
ix += 1
# verify the inserted data is as expected
# we sort the index for MySQL
df_db = await _TestsExampleTable.aread_from_db(engine=engine, schema=schema, table_name=table_name)
pd.testing.assert_frame_equal(df.sort_index(), df_db.sort_index())
# -
# ## Test of an empty DataFrame
# +
@drop_table_between_tests(table_name=TableNames.WITH_YIELD_EMPTY)
def run_test_yield_empty_df(engine, schema):
df = pd.DataFrame({'id': [], 'value': []}).set_index('id')
# we should get an empty generator back
iterator = upsert(con=engine, df=df, table_name=TableNames.WITH_YIELD_EMPTY, if_row_exists='update',
schema=schema, dtype={'id': INT, 'value': INT}, yield_chunks=True)
# the for loop should never run because the generator should be empty
for result in iterator:
raise AssertionError('Expected the generator returned by upsert '
'with an empty df to be empty') # pragma: no cover
@adrop_table_between_tests(table_name=TableNames.WITH_YIELD_EMPTY)
async def run_test_yield_empty_df_async(engine, schema):
df = pd.DataFrame({'id': [], 'value': []}).set_index('id')
# we should get an empty generator back
async_gen = await aupsert(con=engine, df=df, table_name=TableNames.WITH_YIELD_EMPTY, if_row_exists='update',
schema=schema, dtype={'id': INT, 'value': INT}, yield_chunks=True)
# the for loop should never run because the generator should be empty
async for result in async_gen:
raise AssertionError('Expected the generator returned by aupsert '
'with an empty df to be empty') # pragma: no cover
# -
# # Actual tests
# +
def test_get_nb_rows(engine, schema):
sync_or_async_test(engine=engine, schema=schema,
f_async=run_test_get_nb_rows_async,
f_sync=run_test_get_nb_rows)
def test_yield_empty_df(engine, schema):
sync_or_async_test(engine=engine, schema=schema,
f_async=run_test_yield_empty_df_async,
f_sync=run_test_yield_empty_df)
|
2926bd2806654ce84f6b2754c7fc6804e2d73136
|
5511b2316df99a0d5e0fd3d64de964da911ec372
|
/sdk/python/pulumi_eks/node_group_security_group.py
|
4902a078650ba7ae6066c9fae1be6cb307518cd0
|
[
"Apache-2.0"
] |
permissive
|
pulumi/pulumi-eks
|
b07299454f13beb4e06bfbe1a7a82c8199c564ad
|
7febe52e349208f0d715bcb8fcf002ac0d3ef8a4
|
refs/heads/master
| 2023-09-01T08:07:40.639070
| 2023-08-22T13:54:04
| 2023-08-22T13:54:04
| 145,051,421
| 150
| 78
|
Apache-2.0
| 2023-09-13T20:15:37
| 2018-08-17T00:09:55
|
Java
|
UTF-8
|
Python
| false
| false
| 8,405
|
py
|
node_group_security_group.py
|
# coding=utf-8
# *** WARNING: this file was generated by pulumi-gen-eks. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
import pulumi_aws
__all__ = ['NodeGroupSecurityGroupArgs', 'NodeGroupSecurityGroup']
@pulumi.input_type
class NodeGroupSecurityGroupArgs:
def __init__(__self__, *,
cluster_security_group: pulumi.Input['pulumi_aws.ec2.SecurityGroup'],
eks_cluster: pulumi.Input['pulumi_aws.eks.Cluster'],
vpc_id: pulumi.Input[str],
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
The set of arguments for constructing a NodeGroupSecurityGroup resource.
:param pulumi.Input['pulumi_aws.ec2.SecurityGroup'] cluster_security_group: The security group associated with the EKS cluster.
:param pulumi.Input['pulumi_aws.eks.Cluster'] eks_cluster: The EKS cluster associated with the worker node group
:param pulumi.Input[str] vpc_id: The VPC in which to create the worker node group.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Key-value mapping of tags to apply to this security group.
"""
pulumi.set(__self__, "cluster_security_group", cluster_security_group)
pulumi.set(__self__, "eks_cluster", eks_cluster)
pulumi.set(__self__, "vpc_id", vpc_id)
if tags is not None:
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter(name="clusterSecurityGroup")
def cluster_security_group(self) -> pulumi.Input['pulumi_aws.ec2.SecurityGroup']:
"""
The security group associated with the EKS cluster.
"""
return pulumi.get(self, "cluster_security_group")
@cluster_security_group.setter
def cluster_security_group(self, value: pulumi.Input['pulumi_aws.ec2.SecurityGroup']):
pulumi.set(self, "cluster_security_group", value)
@property
@pulumi.getter(name="eksCluster")
def eks_cluster(self) -> pulumi.Input['pulumi_aws.eks.Cluster']:
"""
The EKS cluster associated with the worker node group
"""
return pulumi.get(self, "eks_cluster")
@eks_cluster.setter
def eks_cluster(self, value: pulumi.Input['pulumi_aws.eks.Cluster']):
pulumi.set(self, "eks_cluster", value)
@property
@pulumi.getter(name="vpcId")
def vpc_id(self) -> pulumi.Input[str]:
"""
The VPC in which to create the worker node group.
"""
return pulumi.get(self, "vpc_id")
@vpc_id.setter
def vpc_id(self, value: pulumi.Input[str]):
pulumi.set(self, "vpc_id", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Key-value mapping of tags to apply to this security group.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
class NodeGroupSecurityGroup(pulumi.ComponentResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
cluster_security_group: Optional[pulumi.Input['pulumi_aws.ec2.SecurityGroup']] = None,
eks_cluster: Optional[pulumi.Input['pulumi_aws.eks.Cluster']] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
vpc_id: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
NodeGroupSecurityGroup is a component that wraps creating a security group for node groups with the default ingress & egress rules required to connect and work with the EKS cluster security group.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input['pulumi_aws.ec2.SecurityGroup'] cluster_security_group: The security group associated with the EKS cluster.
:param pulumi.Input['pulumi_aws.eks.Cluster'] eks_cluster: The EKS cluster associated with the worker node group
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Key-value mapping of tags to apply to this security group.
:param pulumi.Input[str] vpc_id: The VPC in which to create the worker node group.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: NodeGroupSecurityGroupArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
NodeGroupSecurityGroup is a component that wraps creating a security group for node groups with the default ingress & egress rules required to connect and work with the EKS cluster security group.
:param str resource_name: The name of the resource.
:param NodeGroupSecurityGroupArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(NodeGroupSecurityGroupArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
cluster_security_group: Optional[pulumi.Input['pulumi_aws.ec2.SecurityGroup']] = None,
eks_cluster: Optional[pulumi.Input['pulumi_aws.eks.Cluster']] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
vpc_id: Optional[pulumi.Input[str]] = None,
__props__=None):
opts = pulumi.ResourceOptions.merge(_utilities.get_resource_opts_defaults(), opts)
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.id is not None:
raise ValueError('ComponentResource classes do not support opts.id')
else:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = NodeGroupSecurityGroupArgs.__new__(NodeGroupSecurityGroupArgs)
if cluster_security_group is None and not opts.urn:
raise TypeError("Missing required property 'cluster_security_group'")
__props__.__dict__["cluster_security_group"] = cluster_security_group
if eks_cluster is None and not opts.urn:
raise TypeError("Missing required property 'eks_cluster'")
__props__.__dict__["eks_cluster"] = eks_cluster
__props__.__dict__["tags"] = tags
if vpc_id is None and not opts.urn:
raise TypeError("Missing required property 'vpc_id'")
__props__.__dict__["vpc_id"] = vpc_id
__props__.__dict__["security_group"] = None
__props__.__dict__["security_group_rule"] = None
super(NodeGroupSecurityGroup, __self__).__init__(
'eks:index:NodeGroupSecurityGroup',
resource_name,
__props__,
opts,
remote=True)
@property
@pulumi.getter(name="securityGroup")
def security_group(self) -> pulumi.Output['pulumi_aws.ec2.SecurityGroup']:
"""
The security group for node groups with the default ingress & egress rules required to connect and work with the EKS cluster security group.
"""
return pulumi.get(self, "security_group")
@property
@pulumi.getter(name="securityGroupRule")
def security_group_rule(self) -> pulumi.Output['pulumi_aws.ec2.SecurityGroupRule']:
"""
The EKS cluster ingress rule.
"""
return pulumi.get(self, "security_group_rule")
|
566916dc8603d8057dde6d75fd3f8d543d32766a
|
2e3cbdf0b30ad85a049622a5b862976eb59a1730
|
/fireworks/user_objects/firetasks/tests/test_filepad_tasks.py
|
9be33fe2f27202f4b82ed627b07b3a1f65623ef0
|
[
"LicenseRef-scancode-generic-cla",
"BSD-2-Clause",
"LicenseRef-scancode-hdf5"
] |
permissive
|
materialsproject/fireworks
|
dc754122374ffce4859b3418a40fc8796879c0e2
|
579bcf411196ce0bebb4f04ccd2410c091c966cf
|
refs/heads/main
| 2023-08-19T00:18:44.331744
| 2023-08-14T00:43:32
| 2023-08-14T00:44:39
| 7,507,548
| 298
| 195
|
NOASSERTION
| 2023-09-04T08:24:47
| 2013-01-08T19:18:02
|
Python
|
UTF-8
|
Python
| false
| false
| 10,385
|
py
|
test_filepad_tasks.py
|
__author__ = "Kiran Mathew, Johannes Hoermann"
import os
import unittest
from ruamel.yaml import YAML
from fireworks.user_objects.firetasks.filepad_tasks import (
AddFilesTask,
DeleteFilesTask,
GetFilesByQueryTask,
GetFilesTask,
)
from fireworks.utilities.filepad import FilePad
module_dir = os.path.abspath(os.path.dirname(__file__))
class FilePadTasksTest(unittest.TestCase):
def setUp(self):
self.paths = [os.path.join(module_dir, "write.yaml"), os.path.join(module_dir, "delete.yaml")]
self.identifiers = ["write", "delete"]
self.fp = FilePad.auto_load()
def test_addfilestask_run(self):
t = AddFilesTask(paths=self.paths, identifiers=self.identifiers)
t.run_task({})
write_file_contents, _ = self.fp.get_file("write")
with open(self.paths[0]) as f:
assert write_file_contents == f.read().encode()
del_file_contents, _ = self.fp.get_file("delete")
with open(self.paths[1]) as f:
assert del_file_contents == f.read().encode()
def test_deletefilestask_run(self):
t = DeleteFilesTask(identifiers=self.identifiers)
t.run_task({})
file_contents, doc = self.fp.get_file("write")
assert file_contents is None
assert doc is None
file_contents, doc = self.fp.get_file("delete")
assert file_contents is None
assert doc is None
def test_getfilestask_run(self):
t = AddFilesTask(paths=self.paths, identifiers=self.identifiers)
t.run_task({})
dest_dir = os.path.abspath(".")
identifiers = ["write"]
new_file_names = ["write_2.yaml"]
t = GetFilesTask(identifiers=identifiers, dest_dir=dest_dir, new_file_names=new_file_names)
t.run_task({})
write_file_contents, _ = self.fp.get_file("write")
with open(os.path.join(dest_dir, new_file_names[0])) as f:
assert write_file_contents == f.read().encode()
os.remove(os.path.join(dest_dir, new_file_names[0]))
def test_getfilesbyquerytask_run(self):
"""Tests querying objects from FilePad by metadata."""
t = AddFilesTask(paths=self.paths, identifiers=self.identifiers, metadata={"key": "value"})
t.run_task({})
dest_dir = os.path.abspath(".")
new_file_names = ["test_file.yaml"]
t = GetFilesByQueryTask(query={"metadata->key": "value"}, dest_dir=dest_dir, new_file_names=new_file_names)
t.run_task({})
test_file_contents, _ = self.fp.get_file("test_idenfifier")
with open(os.path.join(dest_dir, new_file_names[0])) as file:
assert test_file_contents == file.read().encode()
os.remove(os.path.join(dest_dir, new_file_names[0]))
def test_getfilesbyquerytask_run(self):
"""Tests querying objects from FilePad by metadata."""
with open("original_test_file.txt", "w") as f:
f.write("Some file with some content")
t = AddFilesTask(paths=["original_test_file.txt"], identifiers=["some_identifier"], metadata={"key": "value"})
t.run_task({})
os.remove("original_test_file.txt")
dest_dir = os.path.abspath(".")
t = GetFilesByQueryTask(
query={"metadata->key": "value"}, dest_dir=dest_dir, new_file_names=["queried_test_file.txt"]
)
t.run_task({})
test_file_contents, _ = self.fp.get_file("some_identifier")
with open(os.path.join(dest_dir, "queried_test_file.txt")) as f:
assert test_file_contents == f.read().encode()
os.remove(os.path.join(dest_dir, "queried_test_file.txt"))
def test_getfilesbyquerytask_metafile_run(self):
"""Tests writing metadata to a yaml file."""
with open("original_test_file.txt", "w") as f:
f.write("Some file with some content")
t = AddFilesTask(paths=["original_test_file.txt"], identifiers=["test_identifier"], metadata={"key": "value"})
t.run_task({})
os.remove("original_test_file.txt")
dest_dir = os.path.abspath(".")
t = GetFilesByQueryTask(
query={"metadata->key": "value"},
meta_file=True,
meta_file_suffix=".meta.yaml",
dest_dir=dest_dir,
new_file_names=["queried_test_file.txt"],
)
t.run_task({})
with open("queried_test_file.txt.meta.yaml") as f:
yaml = YAML(typ="safe")
metadata = yaml.load(f)
assert metadata["key"] == "value"
os.remove(os.path.join(dest_dir, "queried_test_file.txt"))
os.remove(os.path.join(dest_dir, "queried_test_file.txt.meta.yaml"))
def test_getfilesbyquerytask_ignore_empty_result_run(self):
"""Tests on ignoring empty results from FilePad query."""
dest_dir = os.path.abspath(".")
t = GetFilesByQueryTask(
query={"metadata->key": "value"},
fizzle_empty_result=False,
dest_dir=dest_dir,
new_file_names=["queried_test_file.txt"],
)
t.run_task({})
# test successful if no exception raised
def test_getfilesbyquerytask_raise_empty_result_run(self):
"""Tests on raising exception on empty results from FilePad query."""
dest_dir = os.path.abspath(".")
t = GetFilesByQueryTask(
query={"metadata->key": "value"},
fizzle_empty_result=True,
dest_dir=dest_dir,
new_file_names=["queried_test_file.txt"],
)
with self.assertRaises(ValueError):
t.run_task({})
# test successful if exception raised
def test_getfilesbyquerytask_ignore_degenerate_file_name(self):
"""Tests on ignoring degenerate file name in result from FilePad query."""
with open("degenerate_file.txt", "w") as f:
f.write("Some file with some content")
t = AddFilesTask(paths=["degenerate_file.txt"], identifiers=["some_identifier"], metadata={"key": "value"})
t.run_task({})
with open("degenerate_file.txt", "w") as f:
f.write("Some other file with some other content BUT same file name")
t = AddFilesTask(
paths=["degenerate_file.txt"], identifiers=["some_other_identifier"], metadata={"key": "value"}
)
t.run_task({})
os.remove("degenerate_file.txt")
t = GetFilesByQueryTask(query={"metadata->key": "value"}, fizzle_degenerate_file_name=False)
t.run_task({})
# test successful if no exception raised
def test_getfilesbyquerytask_raise_degenerate_file_name(self):
"""Tests on raising exception on degenerate file name from FilePad query."""
with open("degenerate_file.txt", "w") as f:
f.write("Some file with some content")
t = AddFilesTask(paths=["degenerate_file.txt"], identifiers=["some_identifier"], metadata={"key": "value"})
t.run_task({})
with open("degenerate_file.txt", "w") as f:
f.write("Some other file with some other content BUT same file name")
t = AddFilesTask(
paths=["degenerate_file.txt"], identifiers=["some_other_identifier"], metadata={"key": "value"}
)
t.run_task({})
os.remove("degenerate_file.txt")
t = GetFilesByQueryTask(query={"metadata->key": "value"}, fizzle_degenerate_file_name=True)
with self.assertRaises(ValueError):
t.run_task({})
# test successful if exception raised
def test_getfilesbyquerytask_sort_ascending_name_run(self):
"""Tests on sorting queried files in ascending order."""
file_contents = ["Some file with some content", "Some other file with some other content"]
with open("degenerate_file.txt", "w") as f:
f.write(file_contents[0])
t = AddFilesTask(
paths=["degenerate_file.txt"], identifiers=["some_identifier"], metadata={"key": "value", "sort_key": 0}
)
t.run_task({})
with open("degenerate_file.txt", "w") as f:
f.write(file_contents[-1])
t = AddFilesTask(
paths=["degenerate_file.txt"],
identifiers=["some_other_identifier"],
metadata={"key": "value", "sort_key": 1},
)
t.run_task({})
os.remove("degenerate_file.txt")
t = GetFilesByQueryTask(
query={"metadata->key": "value"}, fizzle_degenerate_file_name=False, sort_key="sort_key", sort_direction=1
)
t.run_task({})
with open("degenerate_file.txt") as f:
assert file_contents[-1] == f.read()
def test_getfilesbyquerytask_sort_descending_name_run(self):
"""Tests on sorting queried files in descending order."""
file_contents = ["Some file with some content", "Some other file with some other content"]
with open("degenerate_file.txt", "w") as f:
f.write(file_contents[0])
t = AddFilesTask(
paths=["degenerate_file.txt"], identifiers=["some_identifier"], metadata={"key": "value", "sort_key": 10}
)
t.run_task({})
with open("degenerate_file.txt", "w") as f:
f.write(file_contents[-1])
t = AddFilesTask(
paths=["degenerate_file.txt"],
identifiers=["some_other_identifier"],
metadata={"key": "value", "sort_key": 20},
)
t.run_task({})
os.remove("degenerate_file.txt")
t = GetFilesByQueryTask(
query={"metadata->key": "value"},
fizzle_degenerate_file_name=False,
sort_key="metadata.sort_key",
sort_direction=-1,
)
t.run_task({})
with open("degenerate_file.txt") as f:
assert file_contents[0] == f.read()
os.remove("degenerate_file.txt")
def test_addfilesfrompatterntask_run(self):
t = AddFilesTask(paths="*.yaml", directory=module_dir)
t.run_task({})
write_file_contents, _ = self.fp.get_file(self.paths[0])
with open(self.paths[0]) as f:
assert write_file_contents == f.read().encode()
del_file_contents, wdoc = self.fp.get_file(self.paths[1])
with open(self.paths[1]) as f:
assert del_file_contents == f.read().encode()
def tearDown(self):
self.fp.reset()
if __name__ == "__main__":
unittest.main()
|
ec9217bfcbaeca8bb46bfbe1161e1ff26bfd4fa3
|
ea7e87037d0a859250b3b0768fe657ab8520c8be
|
/Python/demos/d06_Algorithms01.py
|
af668bff3cfa9abafc9d2d2bb8e9a5f52e1afd80
|
[
"BSD-3-Clause"
] |
permissive
|
CERN/TIGRE
|
80e99d4a49a2af2ec2248db8be3c48142df37134
|
aa4651538e9bce7d0fee2cd2fcf0baa9fcb2ae19
|
refs/heads/master
| 2023-09-03T15:12:02.100453
| 2023-08-16T09:54:34
| 2023-08-16T09:54:34
| 61,034,131
| 473
| 194
|
BSD-3-Clause
| 2023-09-11T11:41:49
| 2016-06-13T12:22:21
|
MATLAB
|
UTF-8
|
Python
| false
| false
| 2,624
|
py
|
d06_Algorithms01.py
|
##% Demo 6: Algorithms01
#
# In this demo the usage of the FDK is explained
#
# --------------------------------------------------------------------------
# --------------------------------------------------------------------------
# This file is part of the TIGRE Toolbox
#
# Copyright (c) 2015, University of Bath and
# CERN-European Organization for Nuclear Research
# All rights reserved.
#
# License: Open Source under BSD.
# See the full license at
# https://github.com/CERN/TIGRE/blob/master/LICENSE
#
# Contact: tigre.toolbox@gmail.com
# Codes: https://github.com/CERN/TIGRE/
# Coded by: Ander Biguri
# --------------------------------------------------------------------------
#%%Initialize
import tigre
import numpy as np
from tigre.utilities import sample_loader
from tigre.utilities import CTnoise
import tigre.algorithms as algs
#%% Geometry
geo = tigre.geometry_default(high_resolution=False)
#%% Load data and generate projections
# define angles
angles = np.linspace(0, 2 * np.pi, 100)
# Load thorax phatom data
head = sample_loader.load_head_phantom(geo.nVoxel)
# generate projections
projections = tigre.Ax(head, geo, angles)
# add noise
noise_projections = CTnoise.add(projections, Poisson=1e5, Gaussian=np.array([0, 10]))
# %% Usage of FDK
# the FDK algorithm has been taken and modified from
# 3D Cone beam CT (CBCT) projection backprojection FDK, iterative reconstruction Matlab examples
# https://www.mathworks.com/matlabcentral/fileexchange/35548-3d-cone-beam-ct--cbct--projection-backprojection-fdk--iterative-reconstruction-matlab-examples
# The algorithm takes, as eny of them, 3 mandatory inputs:
# PROJECTIONS: Projection data
# GEOMETRY : Geometry describing the system
# ANGLES : Propjection angles
# And has a single optional argument:
# FILTER: filter type applied to the projections. Possible options are
# 'ram_lak' (default)
# 'shepp_logan'
# 'cosine'
# 'hamming'
# 'hann'
# The choice of filter will modify the noise and sopme discreatization
# errors, depending on which is chosen.
#
imgFDK1 = algs.fdk(noise_projections, geo, angles, filter="hann")
imgFDK2 = algs.fdk(noise_projections, geo, angles, filter="ram_lak")
# They look quite the same
tigre.plotimg(np.concatenate([imgFDK1, imgFDK2], axis=1), dim="Z")
# but it can be seen that one has bigger errors in the whole image, while
# hte other just in the boundaries
tigre.plotimg(np.concatenate([abs(head - imgFDK1), abs(head - imgFDK2)], axis=1), dim="Z")
|
95d400b14d8a5bb9c944e8fd401eaf12d6a4510b
|
3de3dae722829727edfdd6cc3b67443a69043475
|
/cave/com.raytheon.viz.gfe/localization/gfe/userPython/textUtilities/ModuleAccessor.py
|
68dcc4fb70ef08709c496e7a577cdf24f9af6630
|
[
"LicenseRef-scancode-public-domain",
"Apache-2.0"
] |
permissive
|
Unidata/awips2
|
9aee5b7ec42c2c0a2fa4d877cb7e0b399db74acb
|
d76c9f96e6bb06f7239c563203f226e6a6fffeef
|
refs/heads/unidata_18.2.1
| 2023-08-18T13:00:15.110785
| 2023-08-09T06:06:06
| 2023-08-09T06:06:06
| 19,332,079
| 161
| 75
|
NOASSERTION
| 2023-09-13T19:06:40
| 2014-05-01T00:59:04
|
Java
|
UTF-8
|
Python
| false
| false
| 6,455
|
py
|
ModuleAccessor.py
|
##
# This software was developed and / or modified by Raytheon Company,
# pursuant to Contract DG133W-05-CQ-1067 with the US Government.
#
# U.S. EXPORT CONTROLLED TECHNICAL DATA
# This software product contains export-restricted data whose
# export/transfer/disclosure is restricted by U.S. law. Dissemination
# to non-U.S. persons whether in the United States or abroad requires
# an export license or other authorization.
#
# Contractor Name: Raytheon Company
# Contractor Address: 6825 Pine Street, Suite 340
# Mail Stop B8
# Omaha, NE 68106
# 402.291.0100
#
# See the AWIPS II Master Rights File ("Master Rights File.pdf") for
# further licensing information.
##
# ----------------------------------------------------------------------------
# This software is in the public domain, furnished "as is", without technical
# support, and with no warranty, express or implied, as to its usefulness for
# any purpose.
#
# ModuleAccessor.py
# Access to the internals of Modules
#
# Author: hansen
# ----------------------------------------------------------------------------
########################################################################
##
# This is a base file that is not intended to be overridden.
##
import types, sys
import traceback
class ModuleAccessor:
# Used to access objects within Modules
def __init__(self, errorCB=None):
# Used for error messages
self.__errorCB = errorCB
def module(self, moduleName, showError=1):
# Return the module with the given name
try:
if sys.modules.has_key(moduleName):
#del sys.modules[moduleName] # this is bad for the automated tests code replacement
return sys.modules[moduleName]
module = __import__(moduleName)
except:
if showError and self.__errorCB is not None:
self.__errorCB("Problem finding or importing module: "
+ moduleName, tracebackFlag=1)
return None
return module
def variables(self, moduleName, variableList, showError=1):
# Return the global variables in the given module
module = self.module(moduleName, showError)
if module is None:
return None
variables = []
for variableName in variableList:
if variableName in module.__dict__.keys():
variables.append(module.__dict__[variableName])
else:
variables.append(None)
return tuple(variables)
def variable(self, moduleName, variableName, showError=1):
# Return the global variable in the given module
module = self.module(moduleName, showError)
if module is None:
return None
if variableName in module.__dict__.keys():
return module.__dict__[variableName]
else:
return None
def classDefinition(self, moduleName, className):
# Returns the class in the given module
if className is None:
return None, None
module = self.module(moduleName, 1)
if module is None:
return None, None
# Look for Class
classDefinition = self.getClassDefinition(module, className)
return module, classDefinition
def getClassDefinition(self, module, className):
# Check for the given class in the module
if className in module.__dict__.keys() and \
type(module.__dict__[className]) is types.ClassType:
return module.__dict__[className]
else:
return None
def getFunctions(self, moduleName, functionNames, className=None,
classArgs=None, classOnly=0):
# Returns a dictionary containing the executable functions
# Looks first for functions in a class
# If not found, looks for functions in the module itself
# These functions are definitions, not executable instances
module, classDefinition = self.classDefinition(moduleName, className)
if module is None:
return None, None, None
classInstance = None
if classDefinition is not None:
# Create the callable class instance and set up the
# functions
classInstance = classDefinition(classArgs)
functionDict = self.getClassFunctions(
classInstance, functionNames)
elif not classOnly == 1:
# Look for a Function with same name as module
functionDict = self.getModuleFunctions(
module, functionNames)
else:
return None, None, None
return module, classInstance, functionDict
def getClassFunctions(self, classInstance, functionNames):
# Returns a dictionary containing the functions specified
# in the given classInstance
functionDict = {}
for functionName in functionNames:
functionDict[functionName] = getattr(classInstance,
functionName, None)
return functionDict
def getModuleFunctions(self, module, functionNames):
# Returns a dictionary containing the functions specified
# for the given module
functionDict = {}
for functionName in functionNames:
if functionName in module.__dict__.keys():
result = module.__dict__[functionName]
else:
result = None
functionDict[functionName] = result
return functionDict
def callMethod(self, method, argCallback, classInstance=None):
# Get arguments and call the method
if method is None:
return None
elif hasattr(method, 'im_func'): # It is a user defined method
co = method.im_func.func_code
elif hasattr(method, 'func_code'): # It is a user defined function
co = method.func_code
else: # Don't know what it is
return None
# Set up variables and values for arguments in args
argValueList = argCallback(co.co_varnames[:co.co_argcount], [])
if type(argValueList) is not types.ListType:
error = argValueList
return error
# Format the arguments and call the method
return method(*argValueList)
|
67512510274713a41f9d405e8588d813b8f7a1c1
|
e26d9f2788dac5e21ddb0620b3bfee5777602a2b
|
/GayEmpire.bundle/Contents/Libraries/Shared/unidecode/x00b.py
|
294ea45b8aac3dfe7164a4002e6a3cc8838065f6
|
[
"MIT"
] |
permissive
|
CodyBerenson/PGMA-Modernized
|
c0ea594d7de787aeec7365f0c05d2529032f11be
|
75b3507a73b978fc62f1534fa7fdb0261c91bd3a
|
refs/heads/master
| 2023-09-03T11:09:40.487806
| 2023-08-13T17:33:57
| 2023-08-13T17:33:57
| 247,320,844
| 141
| 37
|
MIT
| 2023-09-14T01:33:53
| 2020-03-14T17:20:43
|
Python
|
UTF-8
|
Python
| false
| false
| 4,019
|
py
|
x00b.py
|
data = (
None, # 0x00
'N', # 0x01
'N', # 0x02
'H', # 0x03
None, # 0x04
'a', # 0x05
'aa', # 0x06
'i', # 0x07
'ii', # 0x08
'u', # 0x09
'uu', # 0x0a
'R', # 0x0b
'L', # 0x0c
None, # 0x0d
None, # 0x0e
'e', # 0x0f
'ai', # 0x10
None, # 0x11
None, # 0x12
'o', # 0x13
'au', # 0x14
'k', # 0x15
'kh', # 0x16
'g', # 0x17
'gh', # 0x18
'ng', # 0x19
'c', # 0x1a
'ch', # 0x1b
'j', # 0x1c
'jh', # 0x1d
'ny', # 0x1e
'tt', # 0x1f
'tth', # 0x20
'dd', # 0x21
'ddh', # 0x22
'nn', # 0x23
't', # 0x24
'th', # 0x25
'd', # 0x26
'dh', # 0x27
'n', # 0x28
None, # 0x29
'p', # 0x2a
'ph', # 0x2b
'b', # 0x2c
'bh', # 0x2d
'm', # 0x2e
'y', # 0x2f
'r', # 0x30
None, # 0x31
'l', # 0x32
'll', # 0x33
None, # 0x34
'', # 0x35
'sh', # 0x36
'ss', # 0x37
's', # 0x38
'h', # 0x39
None, # 0x3a
None, # 0x3b
'\'', # 0x3c
'\'', # 0x3d
'aa', # 0x3e
'i', # 0x3f
'ii', # 0x40
'u', # 0x41
'uu', # 0x42
'R', # 0x43
None, # 0x44
None, # 0x45
None, # 0x46
'e', # 0x47
'ai', # 0x48
None, # 0x49
None, # 0x4a
'o', # 0x4b
'au', # 0x4c
'', # 0x4d
None, # 0x4e
None, # 0x4f
None, # 0x50
None, # 0x51
None, # 0x52
None, # 0x53
None, # 0x54
None, # 0x55
'+', # 0x56
'+', # 0x57
None, # 0x58
None, # 0x59
None, # 0x5a
None, # 0x5b
'rr', # 0x5c
'rh', # 0x5d
None, # 0x5e
'yy', # 0x5f
'RR', # 0x60
'LL', # 0x61
None, # 0x62
None, # 0x63
None, # 0x64
None, # 0x65
'0', # 0x66
'1', # 0x67
'2', # 0x68
'3', # 0x69
'4', # 0x6a
'5', # 0x6b
'6', # 0x6c
'7', # 0x6d
'8', # 0x6e
'9', # 0x6f
'', # 0x70
None, # 0x71
None, # 0x72
None, # 0x73
None, # 0x74
None, # 0x75
None, # 0x76
None, # 0x77
None, # 0x78
None, # 0x79
None, # 0x7a
None, # 0x7b
None, # 0x7c
None, # 0x7d
None, # 0x7e
None, # 0x7f
None, # 0x80
None, # 0x81
'N', # 0x82
'H', # 0x83
None, # 0x84
'a', # 0x85
'aa', # 0x86
'i', # 0x87
'ii', # 0x88
'u', # 0x89
'uu', # 0x8a
None, # 0x8b
None, # 0x8c
None, # 0x8d
'e', # 0x8e
'ee', # 0x8f
'ai', # 0x90
None, # 0x91
'o', # 0x92
'oo', # 0x93
'au', # 0x94
'k', # 0x95
None, # 0x96
None, # 0x97
None, # 0x98
'ng', # 0x99
'c', # 0x9a
None, # 0x9b
'j', # 0x9c
None, # 0x9d
'ny', # 0x9e
'tt', # 0x9f
None, # 0xa0
None, # 0xa1
None, # 0xa2
'nn', # 0xa3
't', # 0xa4
None, # 0xa5
None, # 0xa6
None, # 0xa7
'n', # 0xa8
'nnn', # 0xa9
'p', # 0xaa
None, # 0xab
None, # 0xac
None, # 0xad
'm', # 0xae
'y', # 0xaf
'r', # 0xb0
'rr', # 0xb1
'l', # 0xb2
'll', # 0xb3
'lll', # 0xb4
'v', # 0xb5
None, # 0xb6
'ss', # 0xb7
's', # 0xb8
'h', # 0xb9
None, # 0xba
None, # 0xbb
None, # 0xbc
None, # 0xbd
'aa', # 0xbe
'i', # 0xbf
'ii', # 0xc0
'u', # 0xc1
'uu', # 0xc2
None, # 0xc3
None, # 0xc4
None, # 0xc5
'e', # 0xc6
'ee', # 0xc7
'ai', # 0xc8
None, # 0xc9
'o', # 0xca
'oo', # 0xcb
'au', # 0xcc
'', # 0xcd
None, # 0xce
None, # 0xcf
None, # 0xd0
None, # 0xd1
None, # 0xd2
None, # 0xd3
None, # 0xd4
None, # 0xd5
None, # 0xd6
'+', # 0xd7
None, # 0xd8
None, # 0xd9
None, # 0xda
None, # 0xdb
None, # 0xdc
None, # 0xdd
None, # 0xde
None, # 0xdf
None, # 0xe0
None, # 0xe1
None, # 0xe2
None, # 0xe3
None, # 0xe4
None, # 0xe5
'0', # 0xe6
'1', # 0xe7
'2', # 0xe8
'3', # 0xe9
'4', # 0xea
'5', # 0xeb
'6', # 0xec
'7', # 0xed
'8', # 0xee
'9', # 0xef
'+10+', # 0xf0
'+100+', # 0xf1
'+1000+', # 0xf2
None, # 0xf3
None, # 0xf4
None, # 0xf5
None, # 0xf6
None, # 0xf7
None, # 0xf8
None, # 0xf9
None, # 0xfa
None, # 0xfb
None, # 0xfc
None, # 0xfd
None, # 0xfe
)
|
863982ab891b6c39734e11f28565a59a4c7f9bd3
|
1d363dfbe69b79bc1989251f085060232beb12f5
|
/thermo/mixture.py
|
1c2bdd7779ad9563e284f40940c1cef4cd830ea3
|
[
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] |
permissive
|
CalebBell/thermo
|
ec602af2316875692e385287c6010e9f206b1bc3
|
8622fada3614179d4372192e0031b4a206384c93
|
refs/heads/master
| 2023-08-30T05:30:07.552575
| 2023-06-25T01:35:53
| 2023-06-25T01:35:53
| 62,404,647
| 529
| 127
|
MIT
| 2023-08-11T18:31:21
| 2016-07-01T16:04:56
|
Python
|
UTF-8
|
Python
| false
| false
| 124,962
|
py
|
mixture.py
|
'''Chemical Engineering Design Library (ChEDL). Utilities for process modeling.
Copyright (C) 2017, 2018, 2019 Caleb Bell <Caleb.Andrew.Bell@gmail.com>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
__all__ = ['Mixture']
from collections import OrderedDict
from chemicals.elements import mass_fractions, mixture_atomic_composition
from chemicals.identifiers import CAS_from_any, mixture_from_any
from chemicals.utils import (
SG,
Joule_Thomson,
Parachor,
R,
SG_to_API,
Vfs_to_zs,
Vm_to_rho,
Z,
isentropic_exponent,
isobaric_expansion,
mixing_simple,
none_and_length_check,
property_mass_to_molar,
property_molar_to_mass,
speed_of_sound,
vapor_mass_quality,
ws_to_zs,
zs_to_Vfs,
zs_to_ws,
)
from chemicals.virial import B_from_Z
from chemicals.volume import ideal_gas
from fluids.core import Bond, Capillary, Grashof, Jakob, Peclet_heat, Prandtl, Reynolds, Weber, nu_mu_converter, thermal_diffusivity
from fluids.numerics import numpy as np
from thermo.chemical import Chemical
from thermo.eos import IG, PR
from thermo.eos_mix import PRMIX
from thermo.heat_capacity import HeatCapacityGasMixture, HeatCapacityLiquidMixture, HeatCapacitySolidMixture
from thermo.interface import SurfaceTensionMixture
from thermo.thermal_conductivity import ThermalConductivityGasMixture, ThermalConductivityLiquidMixture
from thermo.utils import phase_select_property
from thermo.viscosity import ViscosityGasMixture, ViscosityLiquidMixture
from thermo.volume import LINEAR_MISSING_IDEAL, VolumeGasMixture, VolumeLiquidMixture, VolumeSolidMixture
def preprocess_mixture_composition(IDs=None, zs=None, ws=None, Vfls=None,
Vfgs=None, ignore_exceptions=False):
r'''Composition preprocessing function for the :obj:`thermo.mixture.Mixture`
class, as it had grown to the size it required its own function.
This function accepts the possible ways of specifying composition, parses
and checks them to an extent, and returns the same arguments it receives.
The tasks it performs are as follows:
* Check if the input ID was a string, or a 1-length list, which is one
of the main keys or synonyms retrievable from
:obj:`thermo.identifiers.mixture_from_any`; if it is, take the
composition from that method (weight fractions will be returned).
* If the ID is a string or a 1-length list, set the composition to
be pure (if no other composition was specified).
* If the composition (zs, ws, Vfls, Vfgs) is a list, turn it into a
copy of the list to not change other instances of it.
* If the composition is a numpy array, convert it to a list for greater
speed.
* If the composition is a dict or OrderedDict, take the keys of it
as the identifiers from its keys and the composition as its values.
If no composition has been specified after the above parsing, an exception
is raised.
If multiple ways of specifying composition were used, raise an exception.
If the length of the specified composition is not the same as the number
of identifiers given, an exception is raised.
Note this method does not normalize composition to one; or check the
identifiers are valid.
'''
# Test if the input ID a string or a list
if hasattr(IDs, 'strip') or (isinstance(IDs, list) and len(IDs) == 1):
try:
# Assume the name was a pre-defined mixture
mix = mixture_from_any(IDs)
IDs = mix.CASs#d["CASs"]
ws = mix.ws#_d["ws"]
except:
if hasattr(IDs, 'strip'):
IDs = [IDs]
zs = [1.0]
elif isinstance(IDs, list) and len(IDs) == 1:
if zs is None and ws is None and Vfls is None and Vfgs is None:
zs = [1.0]
else:
if not ignore_exceptions:
raise Exception('Could not recognize the mixture IDs')
else:
return IDs, zs, ws, Vfls, Vfgs
# Handle numpy array inputs; also turn mutable inputs into copies
if zs is not None:
t = type(zs)
if t == list:
zs = list(zs)
elif t == np.ndarray:
zs = zs.tolist()
elif isinstance(zs, (OrderedDict, dict)):
IDs = list(zs.keys())
zs = list(zs.values())
length_matching = len(zs) == len(IDs)
elif ws is not None:
t = type(ws)
if t == list:
ws = list(ws)
elif t == np.ndarray:
ws = ws.tolist()
elif isinstance(ws, (OrderedDict, dict)):
IDs = list(ws.keys())
ws = list(ws.values())
length_matching = len(ws) == len(IDs)
elif Vfls is not None:
t = type(Vfls)
if t == list:
Vfls = list(Vfls)
elif t == np.ndarray:
Vfls = Vfls.tolist()
elif isinstance(Vfls, (OrderedDict, dict)):
IDs = list(Vfls.keys())
Vfls = list(Vfls.values())
length_matching = len(Vfls) == len(IDs)
elif Vfgs is not None:
t = type(Vfgs)
if t == list:
Vfgs = list(Vfgs)
elif t == np.ndarray:
Vfgs = Vfgs.tolist()
elif isinstance(Vfgs, (OrderedDict, dict)):
IDs = list(Vfgs.keys())
Vfgs = list(Vfgs.values())
length_matching = len(Vfgs) == len(IDs)
else:
if not ignore_exceptions:
raise Exception("One of 'zs', 'ws', 'Vfls', or 'Vfgs' is required to define the mixture")
# Do not to a test on multiple composition inputs in case the user specified
# a composition, plus one was set (it will be zero anyway)
if not ignore_exceptions:
if len(IDs) > 1 and ((zs is not None) + (ws is not None) + (Vfgs is not None) + (Vfls is not None)) > 1:
raise Exception('Multiple different composition arguments were '
"specified; specify only one of the arguments "
"'zs', 'ws', 'Vfls', or 'Vfgs'.")
if not length_matching:
raise Exception('Composition is not the same length as the component identifiers')
return IDs, zs, ws, Vfls, Vfgs
class Mixture:
'''Creates a Mixture object which contains basic information such as
molecular weight and the structure of the species, as well as thermodynamic
and transport properties as a function of two of the variables temperature,
pressure, vapor fraction, enthalpy, or entropy.
The components of the mixture must be specified by specifying the names of
the chemicals; the composition can be specified by providing any one of the
following parameters:
* Mass fractions `ws`
* Mole fractions `zs`
* Liquid volume fractions (based on pure component densities) `Vfls`
* Gas volume fractions (based on pure component densities) `Vfgs`
If volume fractions are provided, by default the pure component volumes
are calculated at the specified `T` and `P`. To use another reference
temperature and pressure specify it as a tuple for the argument `Vf_TP`.
If no thermodynamic conditions are specified, or if only one of T and P
are specifed without another thermodynamic variable as well, the T and P
298.15 K and/or 101325 Pa will be set instead of the missing variables.
Parameters
----------
IDs : list, optional
List of chemical identifiers - names, CAS numbers, SMILES or InChi
strings can all be recognized and may be mixed [-]
zs : list or dict, optional
Mole fractions of all components in the mixture [-]
ws : list or dict, optional
Mass fractions of all components in the mixture [-]
Vfls : list or dict, optional
Volume fractions of all components as a hypothetical liquid phase based
on pure component densities [-]
Vfgs : list, or dict optional
Volume fractions of all components as a hypothetical gas phase based
on pure component densities [-]
T : float, optional
Temperature of the mixture (default 298.15 K), [K]
P : float, optional
Pressure of the mixture (default 101325 Pa) [Pa]
VF : float, optional
Vapor fraction (mole basis) of the mixture, [-]
Hm : float, optional
Molar enthalpy of the mixture, [J/mol]
H : float, optional
Mass enthalpy of the mixture, [J/kg]
Sm : float, optional
Molar entropy of the mixture, [J/mol/K]
S : float, optional
Mass entropy of the mixture, [J/kg/K]
pkg : object
The thermodynamic property package to use for flash calculations;
one of the caloric packages in :obj:`thermo.property_package`;
defaults to the ideal model [-]
Vf_TP : tuple(2, float), optional
The (T, P) at which the volume fractions are specified to be at, [K]
and [Pa]
Attributes
----------
MW : float
Mole-weighted average molecular weight all chemicals in the mixture,
[g/mol]
IDs : list of str
Names of all the species in the mixture as given in the input, [-]
names : list of str
Names of all the species in the mixture, [-]
CASs : list of str
CAS numbers of all species in the mixture, [-]
MWs : list of float
Molecular weights of all chemicals in the mixture, [g/mol]
Tms : list of float
Melting temperatures of all chemicals in the mixture, [K]
Tbs : list of float
Boiling temperatures of all chemicals in the mixture, [K]
Tcs : list of float
Critical temperatures of all chemicals in the mixture, [K]
Pcs : list of float
Critical pressures of all chemicals in the mixture, [Pa]
Vcs : list of float
Critical volumes of all chemicals in the mixture, [m^3/mol]
Zcs : list of float
Critical compressibilities of all chemicals in the mixture, [-]
rhocs : list of float
Critical densities of all chemicals in the mixture, [kg/m^3]
rhocms : list of float
Critical molar densities of all chemicals in the mixture, [mol/m^3]
omegas : list of float
Acentric factors of all chemicals in the mixture, [-]
StielPolars : list of float
Stiel Polar factors of all chemicals in the mixture,
see :obj:`chemicals.acentric.Stiel_polar_factor` for the definition, [-]
Tts : list of float
Triple temperatures of all chemicals in the mixture, [K]
Pts : list of float
Triple pressures of all chemicals in the mixture, [Pa]
Hfuss : list of float
Enthalpy of fusions of all chemicals in the mixture, [J/kg]
Hfusms : list of float
Molar enthalpy of fusions of all chemicals in the mixture, [J/mol]
Hsubs : list of float
Enthalpy of sublimations of all chemicals in the mixture, [J/kg]
Hsubms : list of float
Molar enthalpy of sublimations of all chemicals in the mixture, [J/mol]
Hfms : list of float
Molar enthalpy of formations of all chemicals in the mixture, [J/mol]
Hfs : list of float
Enthalpy of formations of all chemicals in the mixture, [J/kg]
Gfms : list of float
Molar Gibbs free energies of formation of all chemicals in the mixture,
[J/mol]
Gfs : list of float
Gibbs free energies of formation of all chemicals in the mixture,
[J/kg]
Sfms : list of float
Molar entropy of formation of all chemicals in the mixture,
[J/mol/K]
Sfs : list of float
Entropy of formation of all chemicals in the mixture,
[J/kg/K]
S0ms : list of float
Standard absolute entropies of all chemicals in the mixture,
[J/mol/K]
S0s : list of float
Standard absolute entropies of all chemicals in the mixture,
[J/kg/K]
Hcms : list of float
Molar higher heats of combustions of all chemicals in the mixture,
[J/mol]
Hcs : list of float
Higher heats of combustions of all chemicals in the mixture,
[J/kg]
Hcms_lower : list of float
Molar lower heats of combustions of all chemicals in the mixture,
[J/mol]
Hcs_lower : list of float
Higher lower of combustions of all chemicals in the mixture,
[J/kg]
Tflashs : list of float
Flash points of all chemicals in the mixture, [K]
Tautoignitions : list of float
Autoignition points of all chemicals in the mixture, [K]
LFLs : list of float
Lower flammability limits of the gases in an atmosphere at STP, mole
fractions, [-]
UFLs : list of float
Upper flammability limit of the gases in an atmosphere at STP, mole
fractions, [-]
TWAs : list of list of tuple(quantity, unit)
Time-Weighted Average limits on worker exposure to dangerous chemicals.
STELs : list of tuple(quantity, unit)
Short-term Exposure limits on worker exposure to dangerous chemicals.
Ceilings : list of tuple(quantity, unit)
Ceiling limits on worker exposure to dangerous chemicals.
Skins : list of bool
Whether or not each of the chemicals can be absorbed through the skin.
Carcinogens : list of str or dict
Carcinogen status information for each chemical in the mixture.
Chemicals : list of Chemical instances
Chemical instances used in calculating mixture properties, [-]
dipoles : list of float
Dipole moments of all chemicals in the mixture in debye,
[3.33564095198e-30 ampere*second^2]
Stockmayers : list of float
Lennard-Jones depth of potential-energy minimum over k for all
chemicals in the mixture, [K]
molecular_diameters : list of float
Lennard-Jones molecular diameters of all chemicals in the mixture,
[angstrom]
GWPs : list of float
Global warming potentials (default 100-year outlook) (impact/mass
chemical)/(impact/mass CO2) of all chemicals in the mixture, [-]
ODPs : list of float
Ozone Depletion potentials (impact/mass chemical)/(impact/mass CFC-11),
of all chemicals in the mixture, [-]
logPs : list of float
Octanol-water partition coefficients of all chemicals in the mixture,
[-]
Psat_298s : list of float
Vapor pressure of the chemicals in the mixture at 298.15 K, [Pa]
phase_STPs : list of str
Phase of the chemicals in the mixture at 298.15 K and 101325 Pa; one of
's', 'l', 'g', or 'l/g'.
Vml_Tbs : list of float
Molar volumes of the chemicals in the mixture as liquids at their
normal boiling points, [m^3/mol]
Vml_Tms : list of float
Molar volumes of the chemicals in the mixture as liquids at their
melting points, [m^3/mol]
Vml_STPs : list of float
Molar volume of the chemicals in the mixture as liquids at 298.15 K and
101325 Pa, [m^3/mol]
rhoml_STPs : list of float
Molar densities of the chemicals in the mixture as liquids at 298.15 K
and 101325 Pa, [mol/m^3]
Vmg_STPs : list of float
Molar volume of the chemicals in the mixture as gases at 298.15 K and
101325 Pa, [m^3/mol]
Vms_Tms : list of float
Molar volumes of solid phase at the melting point [m^3/mol]
rhos_Tms : list of float
Mass densities of solid phase at the melting point [kg/m^3]
Hvap_Tbms : list of float
Molar enthalpies of vaporization of the chemicals in the mixture at
their normal boiling points, [J/mol]
Hvap_Tbs : list of float
Mass enthalpies of vaporization of the chemicals in the mixture at
their normal boiling points, [J/kg]
alpha
alphag
alphags
alphal
alphals
A
Am
atom_fractions
atom_fractionss
atomss
Bvirial
charges
Cp
Cpg
Cpgm
Cpgms
Cpgs
Cpl
Cplm
Cplms
Cpls
Cpm
Cps
Cpsm
Cpsms
Cpss
Cvg
Cvgm
Cvgms
Cvgs
economic_statuses
eos
formulas
Hvapms
Hvaps
InChI_Keys
InChIs
isentropic_exponent
isentropic_exponents
isobaric_expansion
isobaric_expansion_g
isobaric_expansion_gs
isobaric_expansion_l
isobaric_expansion_ls
IUPAC_names
JT
JTg
JTgs
JTl
JTls
k
kg
kgs
kl
kls
legal_statuses
mass_fractions
mass_fractionss
mu
mug
mugs
mul
muls
nu
nug
nugs
nul
nuls
permittivites
Pr
Prg
Prgs
Prl
Prls
Psats
PSRK_groups
PubChems
rho
rhog
rhogm
rhogms
rhogm_STP
rhogs
rhog_STP
rhol
rholm
rholms
rholm_STP
rhols
rhol_STP
rhom
rhosms
rhoss
ringss
sigma
sigmas
smiless
solubility_parameters
synonymss
U
Um
UNIFAC_Dortmund_groups
UNIFAC_groups
Vm
Vmg
Vmgs
Vmg_STP
Vml
Vmls
Vml_STP
Vmss
Z
Zg
Zgs
Zg_STP
Zl
Zls
Zl_STP
Zss
Notes
-----
.. warning::
The Mixture class is not designed for high-performance or the ability
to use different thermodynamic models. It is especially limited in its
multiphase support and the ability to solve with specifications other
than temperature and pressure. It is impossible to change constant
properties such as a compound's critical temperature in this interface.
It is recommended to switch over to the :obj:`thermo.flash` interface
which solves those problems and is better positioned to grow. That
interface also requires users to be responsible for their chemical
constants and pure component correlations; while default values can
easily be loaded for most compounds, the user is ultimately responsible
for them.
Examples
--------
Creating Mixture objects:
>>> Mixture(['water', 'ethanol'], Vfls=[.6, .4], T=300, P=1E5)
<Mixture, components=['water', 'ethanol'], mole fractions=[0.8299, 0.1701], T=300.00 K, P=100000 Pa>
For mixtures with large numbers of components, it may be confusing to enter
the composition separate from the names of the chemicals. For that case,
the syntax using dictionaries as follows is supported with any composition
specification:
>>> comp = OrderedDict([('methane', 0.96522),
... ('nitrogen', 0.00259),
... ('carbon dioxide', 0.00596),
... ('ethane', 0.01819),
... ('propane', 0.0046),
... ('isobutane', 0.00098),
... ('butane', 0.00101),
... ('2-methylbutane', 0.00047),
... ('pentane', 0.00032),
... ('hexane', 0.00066)])
>>> m = Mixture(zs=comp)
'''
flashed = True
eos_in_a_box = []
ks = None
Vms = None
rhos = None
xs = None
ys = None
phase = None
V_over_F = None
conductivity = None
Hm = None
H = None
isobaric_expansion_g = None
isobaric_expansion_l = None
T_default = 298.15
P_default = 101325.
autoflash = True # Whether or not to flash on init
def __repr__(self):
txt = f'<Mixture, components={self.names}, mole fractions={[round(i,4) for i in self.zs]}'
# T and P may not be available if a flash has failed
try:
txt += f', T={self.T:.2f} K, P={self.P:.0f} Pa>'
except:
txt += ', thermodynamic conditions unknown>'
return txt
def __init__(self, IDs=None, zs=None, ws=None, Vfls=None, Vfgs=None,
T=None, P=None,
VF=None, H=None, Hm=None, S=None, Sm=None, pkg=None, Vf_TP=(None, None)):
# Perofrm preprocessing of the mixture composition separately so it
# can be tested on its own
IDs, zs, ws, Vfls, Vfgs = preprocess_mixture_composition(IDs=IDs,
zs=zs, ws=ws,
Vfls=Vfls,
Vfgs=Vfgs)
self.IDs = IDs
self.N = len(IDs)
self.cmps = range(self.N)
T_unsolved = T if T is not None else self.T_default
P_unsolved = P if P is not None else self.P_default
self.Chemicals = [Chemical(ID, P=P_unsolved, T=T_unsolved, autocalc=False) for ID in self.IDs]
# Required for densities for volume fractions before setting fractions
self.set_chemical_constants()
self.set_Chemical_property_objects()
if zs:
self.zs = zs if sum(zs) == 1 else [zi/sum(zs) for zi in zs]
self.ws = zs_to_ws(zs, self.MWs)
elif ws:
self.ws = ws if sum(ws) == 1 else [wi/sum(ws) for wi in ws]
self.zs = ws_to_zs(ws, self.MWs)
elif Vfls or Vfgs:
T_vf, P_vf = Vf_TP
if T_vf is None:
T_vf = T_unsolved
if P_vf is None:
P_vf = P_unsolved
if Vfls:
Vfs = Vfls if sum(Vfls) == 1 else [Vfli/sum(Vfls) for Vfli in Vfls]
VolumeObjects = self.VolumeLiquids
Vms_TP = self.Vmls
else:
Vfs = Vfgs if sum(Vfgs) == 1 else [Vfgi/sum(Vfgs) for Vfgi in Vfgs]
VolumeObjects = self.VolumeGases
#Vms_TP = self.Vmgs
Vms_TP = [ideal_gas(T_vf, P_vf)]*self.N
if (T_vf != T or P_vf != P) and Vfls:
Vms_TP = [i(T_vf, P_vf) for i in VolumeObjects]
self.zs = Vfs_to_zs(Vfs, Vms_TP)
self.ws = zs_to_ws(self.zs, self.MWs)
else:
raise Exception('One of mole fractions `zs`, weight fractions `ws`,'
' pure component liquid volume fractions `Vfls`, or'
' pure component gas volume fractions `Vfgs` must '
'be provided.')
self.MW = mixing_simple(self.zs, self.MWs)
self.set_constant_sources()
self.set_constants()
self.set_TP_sources()
# To preserve backwards compatibility, mixures with no other state vars
# specified will have their T and P initialized to the values of
# T_default and P_default (but only if the values VF, Hm, H, Sm, S are
# None)
non_TP_state_vars = sum(i is not None for i in [VF, Hm, H, Sm, S])
if non_TP_state_vars == 0:
if T is None:
T = self.T_default
if P is None:
P = self.P_default
self.set_property_package(pkg=pkg)
if self.autoflash:
self.flash_caloric(T=T, P=P, VF=VF, Hm=Hm, Sm=Sm, H=H, S=S)
def set_chemical_constants(self):
r'''Basic method which retrieves and sets constants of chemicals to be
accessible as lists from a Mixture object. This gets called
automatically on the instantiation of a new Mixture instance.
'''
self.names = [i.name for i in self.Chemicals]
self.MWs = MWs = [i.MW for i in self.Chemicals]
self.CASs = [i.CAS for i in self.Chemicals]
# Set lists of everything set by Chemical.set_constants
self.Tms = [i.Tm for i in self.Chemicals]
self.Tbs = [i.Tb for i in self.Chemicals]
# Critical Point
self.Tcs = [i.Tc for i in self.Chemicals]
self.Pcs = [i.Pc for i in self.Chemicals]
self.Vcs = [i.Vc for i in self.Chemicals]
self.omegas = [i.omega for i in self.Chemicals]
self.StielPolars = [i.StielPolar for i in self.Chemicals]
self.Zcs = [i.Zc for i in self.Chemicals]
self.rhocs = [i.rhoc for i in self.Chemicals]
self.rhocms = [i.rhocm for i in self.Chemicals]
# Triple point
self.Pts = [i.Pt for i in self.Chemicals]
self.Tts = [i.Tt for i in self.Chemicals]
# Enthalpy
self.Hfuss = [i.Hfus for i in self.Chemicals]
self.Hsubs = [i.Hsub for i in self.Chemicals]
self.Hfusms = [i.Hfusm for i in self.Chemicals]
self.Hsubms = [i.Hsubm for i in self.Chemicals]
# Chemistry - standard state
self.Hfms = [i.Hfm for i in self.Chemicals]
self.Hfs = [i.Hf for i in self.Chemicals]
self.S0ms = [i.S0m for i in self.Chemicals]
self.S0s = [i.S0 for i in self.Chemicals]
self.Gfms = [i.Gfm for i in self.Chemicals]
self.Gfs = [i.Gf for i in self.Chemicals]
self.Sfms = [i.Sfm for i in self.Chemicals]
self.Sfs = [i.Sf for i in self.Chemicals]
# Ideal gas state
self.Hfgms = [i.Hfgm for i in self.Chemicals]
self.Hfgs = [i.Hfg for i in self.Chemicals]
self.S0gms = [i.S0gm for i in self.Chemicals]
self.S0gs = [i.S0g for i in self.Chemicals]
self.Gfgms = [i.Gfgm for i in self.Chemicals]
self.Gfgs = [i.Gfg for i in self.Chemicals]
self.Sfgms = [i.Sfgm for i in self.Chemicals]
self.Sfgs = [i.Sfg for i in self.Chemicals]
# Combustion
self.Hcms = [i.Hcm for i in self.Chemicals]
self.Hcs = [i.Hc for i in self.Chemicals]
self.Hcms_lower = [i.Hcm_lower for i in self.Chemicals]
self.Hcs_lower = [i.Hc_lower for i in self.Chemicals]
self.Hcgms = [i.Hcgm for i in self.Chemicals]
self.Hcgs = [i.Hcg for i in self.Chemicals]
self.Hcgms_lower = [i.Hcgm_lower for i in self.Chemicals]
self.Hcgs_lower = [i.Hcg_lower for i in self.Chemicals]
# Fire Safety Limits
self.Tflashs = [i.Tflash for i in self.Chemicals]
self.Tautoignitions = [i.Tautoignition for i in self.Chemicals]
self.LFLs = [i.LFL for i in self.Chemicals]
self.UFLs = [i.UFL for i in self.Chemicals]
# Chemical Exposure Limits
self.TWAs = [i.TWA for i in self.Chemicals]
self.STELs = [i.STEL for i in self.Chemicals]
self.Ceilings = [i.Ceiling for i in self.Chemicals]
self.Skins = [i.Skin for i in self.Chemicals]
self.Carcinogens = [i.Carcinogen for i in self.Chemicals]
# Misc
self.dipoles = [i.dipole for i in self.Chemicals]
self.molecular_diameters = [i.molecular_diameter for i in self.Chemicals]
self.Stockmayers = [i.Stockmayer for i in self.Chemicals]
# Environmental
self.GWPs = [i.GWP for i in self.Chemicals]
self.ODPs = [i.ODP for i in self.Chemicals]
self.logPs = [i.logP for i in self.Chemicals]
# Analytical
self.RI_Ts = [i.RIT for i in self.Chemicals]
self.RIs = [i.RI for i in self.Chemicals]
self.conductivities = [i.conductivity for i in self.Chemicals]
self.conductivity_Ts = [i.conductivityT for i in self.Chemicals]
# Constant properties obtained from TP
self.Vml_STPs = Vml_STPs = [i.Vml_STP for i in self.Chemicals]
self.rholm_STPs = [i.rhoml_STP for i in self.Chemicals]
self.rhol_STPs = [i.rhol_STP for i in self.Chemicals]
self.Vml_60Fs = Vml_STPs = [i.Vml_60F for i in self.Chemicals]
self.rhoml_60Fs = [i.rhoml_60F for i in self.Chemicals]
self.rhol_60Fs = [i.rhol_60F for i in self.Chemicals]
self.Vmg_STPs = [i.Vmg_STP for i in self.Chemicals]
self.Vms_Tms = [i.Vms_Tm for i in self.Chemicals]
self.rhoms_Tm = [i.rhoms_Tm for i in self.Chemicals]
self.rhos_Tms = [i.rhos_Tm for i in self.Chemicals]
self.Psat_298s = [i.Psat_298 for i in self.Chemicals]
self.phase_STPs = [i.phase_STP for i in self.Chemicals]
self.Vml_Tbs = [i.Vml_Tb for i in self.Chemicals]
self.Vml_Tms = [i.Vml_Tm for i in self.Chemicals]
self.Hvap_Tbms = [i.Hvap_Tbm for i in self.Chemicals]
self.Hvap_Tbs = [i.Hvap_Tb for i in self.Chemicals]
self.Hvapm_298s = [i.Hvapm_298 for i in self.Chemicals]
self.Hvap_298s = [i.Hvap_298 for i in self.Chemicals]
self.solubility_parameters_STP = [i.solubility_parameter_STP for i in self.Chemicals]
### More stuff here
def set_chemical_TP(self, T=None, P=None):
'''Basic method to change all chemical instances to be at the T and P
specified. If they are not specified, the the values of the mixture
will be used. This is not necessary for using the Mixture instance
unless values specified to chemicals are required.
'''
# Tempearture and Pressure Denepdence
# Get and choose initial methods
if T is None:
T = self.T
if P is None:
P = self.P
[i.calculate(T=T, P=P) for i in self.Chemicals]
def set_constant_sources(self):
# None of this takes much time or is important
# Critical Point, Methods only for Tc, Pc, Vc
self.Tc_methods = []#Tc_mixture(Tcs=self.Tcs, zs=self.zs, CASRNs=self.CASs, get_methods=True)
self.Tc_method = None#self.Tc_methods[0]
self.Pc_methods = []#Pc_mixture(Pcs=self.Pcs, zs=self.zs, CASRNs=self.CASs, get_methods=True)
self.Pc_method = None#self.Pc_methods[0]
self.Vc_methods = []#Vc_mixture(Vcs=self.Vcs, zs=self.zs, CASRNs=self.CASs, get_methods=True)
self.Vc_method = None#self.Vc_methods[0]
self.omega_methods = []#omega_mixture(omegas=self.omegas, zs=self.zs, CASRNs=self.CASs, get_methods=True)
self.omega_method = None#self.omega_methods[0]
# No Flammability limits
# self.LFL_methods = LFL_mixture(ys=self.zs, LFLs=self.LFLs, get_methods=True)
# self.LFL_method = self.LFL_methods[0]
# self.UFL_methods = UFL_mixture(ys=self.zs, UFLs=self.UFLs, get_methods=True)
# self.UFL_method = self.UFL_methods[0]
# No triple point
# Mixed Hf linear
# Exposure limits are minimum of any of them or lower
def set_constants(self):
# None of this takes much time or is important
# Melting point
zs = self.zs
self.Tm = mixing_simple(self.Tms, zs)
# Critical Point
try:
self.Tc = mixing_simple(zs, self.Tcs)
except:
self.Tc = None
try:
self.Pc = mixing_simple(zs, self.Pcs)
except:
self.Pc = None
try:
self.Vc = mixing_simple(zs, self.Vcs)
except:
self.Vc = None
try:
self.omega = mixing_simple(zs, self.omegas)
except:
self.omega = None
self.Zc = Z(self.Tc, self.Pc, self.Vc) if all((self.Tc, self.Pc, self.Vc)) else None
self.rhoc = Vm_to_rho(self.Vc, self.MW) if self.Vc else None
self.rhocm = 1./self.Vc if self.Vc else None
# self.LFL = LFL_mixture(ys=self.zs, LFLs=self.LFLs, method=self.LFL_method)
# self.UFL = UFL_mixture(ys=self.zs, UFLs=self.UFLs, method=self.UFL_method)
def set_eos(self, T, P, eos=PRMIX):
try:
self.eos = eos(T=T, P=P, Tcs=self.Tcs, Pcs=self.Pcs, omegas=self.omegas, zs=self.zs)
except:
# Handle overflow errors and so on
self.eos = IG(T=T, P=P)
@property
def eos(self):
r'''Equation of state object held by the mixture. See :
obj:`thermo.eos_mix` for a full listing.
Examples
--------
'''
return self.eos_in_a_box[0]
@eos.setter
def eos(self, eos):
if self.eos_in_a_box:
self.eos_in_a_box.pop()
self.eos_in_a_box.append(eos)
def eos_pures(self, eos=PR, T=None, P=None):
if T is None:
T = self.T
if P is None:
P = self.P
Tcs, Pcs, omegas = self.Tcs, self.Pcs, self.omegas
eos_list = []
for i in range(len(self.zs)):
try:
e = eos(T=T, P=P, Tc=Tcs[i], Pc=Pcs[i], omega=omegas[i])
except:
e = None
eos_list.append(e)
return eos_list
def set_Chemical_property_objects(self):
self.VolumeSolids = [i.VolumeSolid for i in self.Chemicals]
self.VolumeLiquids = [i.VolumeLiquid for i in self.Chemicals]
self.VolumeGases = [i.VolumeGas for i in self.Chemicals]
self.HeatCapacitySolids = [i.HeatCapacitySolid for i in self.Chemicals]
self.HeatCapacityLiquids = [i.HeatCapacityLiquid for i in self.Chemicals]
self.HeatCapacityGases = [i.HeatCapacityGas for i in self.Chemicals]
self.ViscosityLiquids = [i.ViscosityLiquid for i in self.Chemicals]
self.ViscosityGases = [i.ViscosityGas for i in self.Chemicals]
self.ThermalConductivityLiquids = [i.ThermalConductivityLiquid for i in self.Chemicals]
self.ThermalConductivityGases = [i.ThermalConductivityGas for i in self.Chemicals]
self.SurfaceTensions = [i.SurfaceTension for i in self.Chemicals]
self.Permittivities = [i.Permittivity for i in self.Chemicals]
self.VaporPressures = [i.VaporPressure for i in self.Chemicals]
self.SublimationPressures = [i.SublimationPressure for i in self.Chemicals]
self.EnthalpyVaporizations = [i.EnthalpyVaporization for i in self.Chemicals]
self.EnthalpySublimations = [i.EnthalpySublimation for i in self.Chemicals]
def set_TP_sources(self):
self.VolumeSolidMixture = VolumeSolidMixture(CASs=self.CASs, MWs=self.MWs, VolumeSolids=self.VolumeSolids)
self.VolumeLiquidMixture = VolumeLiquidMixture(MWs=self.MWs, Tcs=self.Tcs, Pcs=self.Pcs, Vcs=self.Vcs, Zcs=self.Zcs, omegas=self.omegas, CASs=self.CASs, VolumeLiquids=self.VolumeLiquids)
self.VolumeGasMixture = VolumeGasMixture(eos=self.eos_in_a_box, MWs=self.MWs, CASs=self.CASs, VolumeGases=self.VolumeGases)
# Temporary
self.VolumeGasMixture.method = LINEAR_MISSING_IDEAL
self.HeatCapacityLiquidMixture = HeatCapacityLiquidMixture(MWs=self.MWs, CASs=self.CASs, HeatCapacityLiquids=self.HeatCapacityLiquids)
self.HeatCapacityGasMixture = HeatCapacityGasMixture(MWs=self.MWs, CASs=self.CASs, HeatCapacityGases=self.HeatCapacityGases)
self.HeatCapacitySolidMixture = HeatCapacitySolidMixture(MWs=self.MWs, CASs=self.CASs, HeatCapacitySolids=self.HeatCapacitySolids)
self.ViscosityLiquidMixture = ViscosityLiquidMixture(MWs=self.MWs, CASs=self.CASs, ViscosityLiquids=self.ViscosityLiquids, correct_pressure_pure=False)
self.ViscosityGasMixture = ViscosityGasMixture(MWs=self.MWs, molecular_diameters=self.molecular_diameters, Stockmayers=self.Stockmayers, CASs=self.CASs, ViscosityGases=self.ViscosityGases, correct_pressure_pure=False)
self.ThermalConductivityLiquidMixture = ThermalConductivityLiquidMixture(CASs=self.CASs, MWs=self.MWs, ThermalConductivityLiquids=self.ThermalConductivityLiquids, correct_pressure_pure=False)
self.ThermalConductivityGasMixture = ThermalConductivityGasMixture(MWs=self.MWs, Tbs=self.Tbs, CASs=self.CASs, ThermalConductivityGases=self.ThermalConductivityGases, ViscosityGases=self.ViscosityGases, correct_pressure_pure=False)
self.SurfaceTensionMixture = SurfaceTensionMixture(MWs=self.MWs, Tbs=self.Tbs, Tcs=self.Tcs, CASs=self.CASs, SurfaceTensions=self.SurfaceTensions, VolumeLiquids=self.VolumeLiquids)
def set_property_package(self, pkg=None):
if pkg is None:
from thermo.property_package import IdealCaloric as pkg
eos_mix = type(self.eos_in_a_box[0]) if self.eos_in_a_box else PRMIX
if type(pkg) == type:
self.property_package = pkg(VaporPressures=self.VaporPressures,
Tms=self.Tms, Tbs=self.Tbs,
Tcs=self.Tcs, Pcs=self.Pcs,
HeatCapacityLiquids=self.HeatCapacityLiquids,
HeatCapacityGases=self.HeatCapacityGases,
EnthalpyVaporizations=self.EnthalpyVaporizations,
UNIFAC_groups=self.UNIFAC_groups, omegas=self.omegas,
Hfs=self.Hfgms, Gfs=self.Gfgms,
VolumeLiquids=self.VolumeLiquids, eos=type(self.Chemicals[0].eos),
eos_mix=eos_mix)
else:
# no need to initialize, already exists
self.property_package = pkg
def flash_caloric(self, T=None, P=None, VF=None, Hm=None, Sm=None,
H=None, S=None):
# TODO check if the input values are the same as the current ones
# The property package works only on a mole-basis, so convert
# H or S if specified to a mole basis
if H is not None:
Hm = property_mass_to_molar(H, self.MW)
if S is not None:
Sm = property_mass_to_molar(S, self.MW)
self.property_package.flash_caloric(zs=self.zs, T=T, P=P, VF=VF, Hm=Hm, Sm=Sm)
self.status = self.property_package.status
if self.status is True:
self.T = self.property_package.T
self.P = self.property_package.P
self.V_over_F = self.VF = self.property_package.V_over_F
self.xs = self.property_package.xs
self.ys = self.property_package.ys
self.phase = self.property_package.phase
self.Hm = self.property_package.Hm
self.Sm = self.property_package.Sm
self.Gm = self.property_package.Gm
try:
self.Hm_reactive = self.property_package.Hm_reactive
self.H_reactive = property_molar_to_mass(self.Hm_reactive, self.MW)
except:
self.Hm_reactive = self.H_reactive = None
try:
self.Sm_reactive = self.property_package.Sm_reactive
self.S_reactive = property_molar_to_mass(self.Sm_reactive, self.MW)
except:
self.Sm_reactive = self.S_reactive = None
try:
self.Gm_reactive = self.property_package.Gm_reactive
self.G_reactive = property_molar_to_mass(self.Gm_reactive, self.MW)
except:
self.Gm_reactive = self.G_reactive = None
self.H = property_molar_to_mass(self.Hm, self.MW)
self.S = property_molar_to_mass(self.Sm, self.MW)
self.G = property_molar_to_mass(self.Gm, self.MW)
# values are None when not in the appropriate phase
self.MWl = mixing_simple(self.xs, self.MWs) if self.xs is not None else None
self.MWg = mixing_simple(self.ys, self.MWs) if self.ys is not None else None
self.wsl = zs_to_ws(self.xs, self.MWs) if self.xs is not None else None
self.wsg = zs_to_ws(self.ys, self.MWs) if self.ys is not None else None
if (self.MWl is not None and self.MWg is not None):
self.quality = self.x = vapor_mass_quality(self.V_over_F, MWl=self.MWl, MWg=self.MWg)
else:
self.quality = self.x = 1 if self.phase == 'g' else 0
if self.xs is None:
self.wsl = zs_to_ws(self.ys, self.MWs)
self.MWl = mixing_simple(self.ys, self.MWs)
if self.ys is None:
self.MWg = mixing_simple(self.xs, self.MWs)
self.wsg = zs_to_ws(self.xs, self.MWs)
# TODO: volume fractions - attempt
# if (self.rhol is not None and self.rhog is not None):
# self.Vfg = vapor_mass_quality(self.quality, MWl=self.Vml, MWg=self.Vmg)
# else:
# self.Vfg = None
else:
# flash failed. still want to set what variables that can be set though.
for var in ['T', 'P', 'VF', 'Hm', 'Sm', 'H', 'S']:
if var is not None:
setattr(self, var, locals()[var])
# Not strictly necessary
[i.calculate(self.T, self.P) for i in self.Chemicals]
# self.set_eos(T=self.T, P=self.P)
@property
def Um(self):
r'''Internal energy of the mixture at its current state, in units of
[J/mol].
This property requires that the property package of the mixture
found a solution to the given state variables.
It also depends on the molar volume of the mixture at its current
conditions.
'''
return self.Hm - self.P*self.Vm if (self.Vm and self.Hm is not None) else None
@property
def U(self):
r'''Internal energy of the mixture at its current state,
in units of [J/kg].
This property requires that the property package of the mixture
found a solution to the given state variables.
It also depends on the molar volume of the mixture at its current
conditions.
'''
return property_molar_to_mass(self.Um, self.MW) if (self.Um is not None) else None
@property
def Am(self):
r'''Helmholtz energy of the mixture at its current state,
in units of [J/mol].
This property requires that the property package of the mixture
found a solution to the given state variables.
It also depends on the molar volume of the mixture at its current
conditions.
'''
return self.Um - self.T*self.Sm if (self.Um is not None and self.Sm is not None) else None
@property
def A(self):
r'''Helmholtz energy of the mixture at its current state,
in units of [J/kg].
This property requires that the property package of the mixture
found a solution to the given state variables.
It also depends on the molar volume of the mixture at its current
conditions.
'''
return self.U - self.T*self.S if (self.U is not None and self.S is not None) else None
@property
def Tdew(self):
r'''Dew point temperature of the mixture at its current pressure and
composition, in units of [K].
This property requires that the property package of the mixture
found a solution to the given state variables.
'''
return self.property_package.Tdew(P=self.P, zs=self.zs)
@property
def Pdew(self):
r'''Dew point pressure of the mixture at its current temperature and
composition, in units of [Pa].
This property requires that the property package of the mixture
found a solution to the given state variables.
'''
return self.property_package.Pdew(T=self.T, zs=self.zs)
@property
def Tbubble(self):
r'''Bubble point temperature of the mixture at its current pressure and
composition, in units of [K].
This property requires that the property package of the mixture
found a solution to the given state variables.
'''
return self.property_package.Tbubble(P=self.P, zs=self.zs)
@property
def Pbubble(self):
r'''Bubble point pressure of the mixture at its current temperature and
composition, in units of [Pa].
This property requires that the property package of the mixture
found a solution to the given state variables.
'''
return self.property_package.Pbubble(T=self.T, zs=self.zs)
def Vfls(self, T=None, P=None):
r'''Volume fractions of all species in a hypothetical pure-liquid phase
at the current or specified temperature and pressure. If temperature
or pressure are specified, the non-specified property is assumed to be
that of the mixture. Note this is a method, not a property. Volume
fractions are calculated based on **pure species volumes only**.
Examples
--------
>>> Mixture(['hexane', 'pentane'], zs=[.5, .5], T=315).Vfls()
[0.5299671144566751, 0.47003288554332484]
>>> S = Mixture(['hexane', 'decane'], zs=[0.25, 0.75])
>>> S.Vfls(298.16, 101326)
[0.18301434895886864, 0.8169856510411313]
'''
if (T is None or T == self.T) and (P is None or P == self.P):
Vmls = self.Vmls
else:
if T is None: T = self.T
if P is None: P = self.P
Vmls = [i(T, P) for i in self.VolumeLiquids]
if none_and_length_check([Vmls]):
return zs_to_Vfs(self.zs, Vmls)
return None
def Vfgs(self, T=None, P=None):
r'''Volume fractions of all species in a hypothetical pure-gas phase
at the current or specified temperature and pressure. If temperature
or pressure are specified, the non-specified property is assumed to be
that of the mixture. Note this is a method, not a property. Volume
fractions are calculated based on **pure species volumes only**.
Examples
--------
>>> Mixture(['sulfur hexafluoride', 'methane'], zs=[.2, .9], T=315).Vfgs()
[0.18062059238682632, 0.8193794076131737]
>>> S = Mixture(['sulfur hexafluoride', 'methane'], zs=[.1, .9])
>>> S.Vfgs(P=1E2)
[0.0999987466608421, 0.9000012533391578]
'''
return self.zs
# if (T is None or T == self.T) and (P is None or P == self.P):
# Vmgs = self.Vmgs
# else:
# if T is None: T = self.T
# if P is None: P = self.P
# Vmgs = [i(T, P) for i in self.VolumeGases]
# if none_and_length_check([Vmgs]):
# return zs_to_Vfs(self.zs, Vmgs)
# return None
#
def compound_index(self, CAS):
try:
return self.CASs.index(CAS)
except ValueError:
return self.CASs.index(CAS_from_any(CAS))
# Unimportant constants
@property
def PubChems(self):
r'''PubChem Component ID numbers for all chemicals in the mixture.
Examples
--------
>>> Mixture(['benzene', 'toluene'], ws=[0.5, 0.5]).PubChems
[241, 1140]
'''
return [i.PubChem for i in self.Chemicals]
@property
def formulas(self):
r'''Chemical formulas for all chemicals in the mixture.
Examples
--------
>>> Mixture(['ethanol', 'trichloroethylene', 'furfuryl alcohol'],
... ws=[0.5, 0.2, 0.3]).formulas
['C2H6O', 'C2HCl3', 'C5H6O2']
'''
return [i.formula for i in self.Chemicals]
@property
def smiless(self):
r'''SMILES strings for all chemicals in the mixture.
Examples
--------
>>> Mixture(['methane', 'ethane', 'propane', 'butane'],
... zs=[0.25, 0.25, 0.25, 0.25]).smiless
['C', 'CC', 'CCC', 'CCCC']
'''
return [i.smiles for i in self.Chemicals]
@property
def InChIs(self):
r'''InChI strings for all chemicals in the mixture.
Examples
--------
>>> Mixture(['methane', 'ethane', 'propane', 'butane'],
... zs=[0.25, 0.25, 0.25, 0.25]).InChIs
['CH4/h1H4', 'C2H6/c1-2/h1-2H3', 'C3H8/c1-3-2/h3H2,1-2H3', 'C4H10/c1-3-4-2/h3-4H2,1-2H3']
'''
return [i.InChI for i in self.Chemicals]
@property
def InChI_Keys(self):
r'''InChI keys for all chemicals in the mixture.
Examples
--------
>>> Mixture(['1-nonene'], zs=[1]).InChI_Keys
['JRZJOMJEPLMPRA-UHFFFAOYSA-N']
'''
return [i.InChI_Key for i in self.Chemicals]
@property
def IUPAC_names(self):
r'''IUPAC names for all chemicals in the mixture.
Examples
--------
>>> Mixture(['1-hexene', '1-nonene'], zs=[.7, .3]).IUPAC_names
['hex-1-ene', 'non-1-ene']
'''
return [i.IUPAC_name for i in self.Chemicals]
@property
def synonymss(self):
r'''Lists of synonyms for all chemicals in the mixture.
Examples
--------
>>> Mixture(['Tetradecene', 'Pentadecene'], zs=[.1, .9]).synonymss
[['tetradec-2-ene', 'tetradecene', '2-tetradecene', 'tetradec-2-ene', '26952-13-6', '35953-53-8', '1652-97-7'], ['pentadec-1-ene', '1-pentadecene', 'pentadecene,1-', 'pentadec-1-ene', '13360-61-7', 'pentadecene']]
'''
return [i.synonyms for i in self.Chemicals]
@property
def charges(self):
r'''Charges for all chemicals in the mixture, [faraday].
Examples
--------
>>> Mixture(['water', 'sodium ion', 'chloride ion'], zs=[.9, .05, .05]).charges
[0, 1, -1]
'''
return [i.charge for i in self.Chemicals]
@property
def similarity_variables(self):
r'''Similarity variables for all chemicals in the mixture, see
:obj:`chemicals.elements.similarity_variable` for the definition, [mol/g]
Examples
--------
>>> Mixture(['benzene', 'toluene'], ws=[0.5, 0.5]).similarity_variables
[0.15362587797189262, 0.16279853724428964]
'''
return [i.similarity_variable for i in self.Chemicals]
@property
def atoms(self):
r'''Mole-averaged dictionary of atom counts for all atoms of the
chemicals in the mixture.
Examples
--------
>>> Mixture(['nitrogen', 'oxygen'], zs=[.01, .99]).atoms
{'O': 1.98, 'N': 0.02}
'''
return mixture_atomic_composition(self.atomss, self.zs)
@property
def atomss(self):
r'''List of dictionaries of atom counts for all chemicals in the mixture.
Examples
--------
>>> Mixture(['nitrogen', 'oxygen'], zs=[.01, .99]).atomss
[{'N': 2}, {'O': 2}]
'''
return [i.atoms for i in self.Chemicals]
@property
def ringss(self):
r'''List of ring counts for all chemicals in the mixture.
Examples
--------
>>> Mixture(['Docetaxel', 'Paclitaxel'], zs=[.5, .5]).ringss
[6, 7]
'''
return [i.rings for i in self.Chemicals]
@property
def atom_fractionss(self):
r'''List of dictionaries of atomic fractions for all chemicals in the
mixture.
Examples
--------
>>> Mixture(['oxygen', 'nitrogen'], zs=[.5, .5]).atom_fractionss
[{'O': 1.0}, {'N': 1.0}]
'''
return [i.atom_fractions for i in self.Chemicals]
@property
def atom_fractions(self):
r'''Dictionary of atomic fractions for each atom in the mixture.
Examples
--------
>>> Mixture(['CO2', 'O2'], zs=[0.5, 0.5]).atom_fractions
{'C': 0.2, 'O': 0.8}
'''
things = dict()
for zi, atoms in zip(self.zs, self.atomss):
for atom, count in atoms.items():
if atom in things:
things[atom] += zi*count
else:
things[atom] = zi*count
tot = sum(things.values())
return {atom : value/tot for atom, value in things.items()}
@property
def mass_fractionss(self):
r'''List of dictionaries of mass fractions for all chemicals in the mixture.
Examples
--------
>>> Mixture(['oxygen', 'nitrogen'], zs=[.5, .5]).mass_fractionss
[{'O': 1.0}, {'N': 1.0}]
'''
return [i.mass_fractions for i in self.Chemicals]
@property
def mass_fractions(self):
r'''Dictionary of mass fractions for each atom in the mixture.
Examples
--------
>>> Mixture(['CO2', 'O2'], zs=[0.5, 0.5]).mass_fractions
{'C': 0.15801826905745822, 'O': 0.8419817309425419}
'''
things = dict()
for zi, atoms in zip(self.zs, self.atomss):
for atom, count in atoms.items():
if atom in things:
things[atom] += zi*count
else:
things[atom] = zi*count
return mass_fractions(things)
@property
def legal_statuses(self):
r'''List of dictionaries of the legal status for all chemicals in the
mixture.
Examples
--------
>>> Mixture(['oxygen', 'nitrogen'], zs=[.5, .5]).legal_statuses
[{'DSL': 'LISTED',
'EINECS': 'LISTED',
'NLP': 'UNLISTED',
'SPIN': 'LISTED',
'TSCA': 'LISTED'},
{'DSL': 'LISTED',
'EINECS': 'LISTED',
'NLP': 'UNLISTED',
'SPIN': 'LISTED',
'TSCA': 'LISTED'}]
'''
return [i.legal_status for i in self.Chemicals]
@property
def economic_statuses(self):
r'''List of dictionaries of the economic status for all chemicals in
the mixture.
Examples
--------
>>> Mixture(['o-xylene', 'm-xylene'], zs=[.5, .5]).economic_statuses
[["US public: {'Manufactured': 0.0, 'Imported': 0.0, 'Exported': 0.0}",
u'100,000 - 1,000,000 tonnes per annum',
'OECD HPV Chemicals'],
["US public: {'Manufactured': 39.805, 'Imported': 0.0, 'Exported': 0.0}",
u'100,000 - 1,000,000 tonnes per annum',
'OECD HPV Chemicals']]
'''
return [i.economic_status for i in self.Chemicals]
@property
def UNIFAC_Rs(self):
r'''UNIFAC `R` (normalized Van der Waals volume) values, dimensionless.
Used in the UNIFAC model.
Examples
--------
>>> Mixture(['o-xylene', 'm-xylene'], zs=[.5, .5]).UNIFAC_Rs
[4.6578, 4.6578]
'''
return [i.UNIFAC_R for i in self.Chemicals]
@property
def UNIFAC_Qs(self):
r'''UNIFAC `Q` (normalized Van der Waals area) values, dimensionless.
Used in the UNIFAC model.
Examples
--------
>>> Mixture(['o-xylene', 'decane'], zs=[.5, .5]).UNIFAC_Qs
[3.536, 6.016]
'''
return [i.UNIFAC_Q for i in self.Chemicals]
@property
def UNIFAC_groups(self):
r'''List of dictionaries of UNIFAC subgroup: count groups for each chemical in the mixture. Uses the original
UNIFAC subgroups, as determined by `DDBST's online service <http://www.ddbst.com/unifacga.html>`_.
Examples
--------
>>> Mixture(['1-pentanol', 'decane'], ws=[0.5, 0.5]).UNIFAC_groups
[{1: 1, 2: 4, 14: 1}, {1: 2, 2: 8}]
'''
return [i.UNIFAC_groups for i in self.Chemicals]
@property
def UNIFAC_Dortmund_groups(self):
r'''List of dictionaries of Dortmund UNIFAC subgroup: count groups for each chemcial in the mixture. Uses the
Dortmund UNIFAC subgroups, as determined by `DDBST's online service <http://www.ddbst.com/unifacga.html>`_.
Examples
--------
>>> Mixture(['1-pentanol', 'decane'], ws=[0.5, 0.5]).UNIFAC_Dortmund_groups
[{1: 1, 2: 4, 14: 1}, {1: 2, 2: 8}]
'''
return [i.UNIFAC_Dortmund_groups for i in self.Chemicals]
@property
def PSRK_groups(self):
r'''List of dictionaries of PSRK subgroup: count groups for each chemical in the mixture. Uses the PSRK subgroups,
as determined by `DDBST's online service <http://www.ddbst.com/unifacga.html>`_.
Examples
--------
>>> Mixture(['1-pentanol', 'decane'], ws=[0.5, 0.5]).PSRK_groups
[{1: 1, 2: 4, 14: 1}, {1: 2, 2: 8}]
'''
return [i.PSRK_groups for i in self.Chemicals]
@property
def Van_der_Waals_volumes(self):
r'''List of unnormalized Van der Waals volumes of all the chemicals in
the mixture, in units of [m^3/mol].
Examples
--------
>>> Mixture(['1-pentanol', 'decane'], ws=[0.5, 0.5]).Van_der_Waals_volumes
[6.9762279e-05, 0.00010918455800000001]
'''
return [i.Van_der_Waals_volume for i in self.Chemicals]
@property
def Van_der_Waals_areas(self):
r'''List of unnormalized Van der Waals areas of all the chemicals
in the mixture, in units of [m^2/mol].
Examples
--------
>>> Mixture(['1-pentanol', 'decane'], ws=[0.5, 0.5]).Van_der_Waals_areas
[1052000.0, 1504000.0]
'''
return [i.Van_der_Waals_area for i in self.Chemicals]
@property
def R_specific(self):
r'''Specific gas constant of the mixture, in units of [J/kg/K].
Examples
--------
>>> Mixture(['N2', 'O2'], zs=[0.79, .21]).R_specific
288.1928437986195
'''
return property_molar_to_mass(R, self.MW)
@property
def Hc(self):
r'''Standard higher heat of combustion of the mixture,
in units of [J/kg].
This property depends on the bulk composition only.
'''
return mixing_simple(self.Hcs, self.ws)
@property
def Hcm(self):
r'''Standard higher molar heat of combustion of the mixture,
in units of [J/mol].
This property depends on the bulk composition only.
'''
return mixing_simple(self.Hcms, self.zs)
@property
def Hcm_lower(self):
r'''Standard lower molar heat of combustion of the mixture,
in units of [J/mol].
This property depends on the bulk composition only.
'''
return mixing_simple(self.Hcms_lower, self.zs)
@property
def Hc_lower(self):
r'''Standard lower heat of combustion of the mixture,
in units of [J/kg].
This property depends on the bulk composition only.
'''
return mixing_simple(self.Hcs_lower, self.ws)
def Hc_volumetric_g(self, T=288.7055555555555, P=101325.0):
r'''Standard higher molar heat of combustion of the mixture,
in units of [J/m^3] at the specified `T` and `P` in the gas phase.
This property depends on the bulk composition only.
Parameters
----------
T : float, optional
Reference temperature, [K]
P : float, optional
Reference pressure, [Pa]
Returns
-------
Hc_volumetric_g : float, optional
Higher heat of combustion on a volumetric basis, [J/m^3]
'''
Vm = self.VolumeGasMixture(T=T, P=P, zs=self.zs, ws=self.ws)
Hcm = self.Hcm
return Hcm/Vm
def Hc_volumetric_g_lower(self, T=288.7055555555555, P=101325.0):
r'''Standard lower molar heat of combustion of the mixture,
in units of [J/m^3] at the specified `T` and `P` in the gas phase.
This property depends on the bulk composition only.
Parameters
----------
T : float, optional
Reference temperature, [K]
P : float, optional
Reference pressure, [Pa]
Returns
-------
Hc_volumetric_g : float, optional
Lower heat of combustion on a volumetric basis, [J/m^3]
'''
Vm = self.VolumeGasMixture(T=T, P=P, zs=self.zs, ws=self.ws)
Hcm_lower = self.Hcm_lower
return Hcm_lower/Vm
@property
def charge_balance(self):
r'''Charge imbalance of the mixture, in units of [faraday].
Mixtures meeting the electroneutrality condition will have an imbalance
of 0.
Examples
--------
>>> Mixture(['Na+', 'Cl-', 'water'], zs=[.01, .01, .98]).charge_balance
0.0
'''
return sum([zi*ci for zi, ci in zip(self.zs, self.charges)])
### One phase properties - calculate lazily
@property
def Psats(self):
r'''Pure component vapor pressures of the chemicals in the mixture at
its current temperature, in units of [Pa].
Examples
--------
>>> Mixture(['benzene', 'toluene'], ws=[0.5, 0.5], T=320).Psats
[32029.25774454549, 10724.419010511821]
'''
return [i.Psat for i in self.Chemicals]
@property
def Hvapms(self):
r'''Pure component enthalpies of vaporization of the chemicals in the
mixture at its current temperature, in units of [J/mol].
Examples
--------
>>> Mixture(['benzene', 'toluene'], ws=[0.5, 0.5], T=320).Hvapms
[32639.806783391632, 36851.7902195611]
'''
return [i.Hvapm for i in self.Chemicals]
@property
def Hvaps(self):
r'''Enthalpy of vaporization of the chemicals in the mixture at its
current temperature, in units of [J/kg].
Examples
--------
>>> Mixture(['benzene', 'toluene'], ws=[0.5, 0.5], T=320).Hvaps
[417859.9144942896, 399961.16950519773]
'''
return [i.Hvap for i in self.Chemicals]
@property
def Cpsms(self):
r'''Solid-phase pure component heat capacity of the chemicals in the
mixture at its current temperature, in units of [J/mol/K].
Examples
--------
>>> Mixture(['benzene', 'toluene'], ws=[0.5, 0.5], T=320).Cpsms
[109.77384365511931, 135.22614707678474]
'''
return [i.Cpsm for i in self.Chemicals]
@property
def Cplms(self):
r'''Liquid-phase pure component heat capacity of the chemicals in the
mixture at its current temperature, in units of [J/mol/K].
Examples
--------
>>> Mixture(['benzene', 'toluene'], ws=[0.5, 0.5], T=320).Cplms
[140.9113971170526, 163.62584810669068]
'''
return [i.Cplm for i in self.Chemicals]
@property
def Cpgms(self):
r'''Gas-phase ideal gas heat capacity of the chemicals at its current
temperature, in units of [J/mol/K].
Examples
--------
>>> Mixture(['benzene', 'toluene'], ws=[0.5, 0.5], T=320).Cpgms
[89.55804092586159, 111.70390334788907]
'''
return [i.Cpgm for i in self.Chemicals]
@property
def Cpss(self):
r'''Solid-phase pure component heat capacity of the chemicals in the
mixture at its current temperature, in units of [J/kg/K].
Examples
--------
>>> Mixture(['benzene', 'toluene'], ws=[0.5, 0.5], T=320).Cpss
[1405.341925822248, 1467.6412627521154]
'''
return [i.Cps for i in self.Chemicals]
@property
def Cpls(self):
r'''Liquid-phase pure component heat capacity of the chemicals in the
mixture at its current temperature, in units of [J/kg/K].
Examples
--------
>>> Mixture(['benzene', 'toluene'], ws=[0.5, 0.5], T=320).Cpls
[1803.9697581961016, 1775.869915141704]
'''
return [i.Cpl for i in self.Chemicals]
@property
def Cpgs(self):
r'''Gas-phase pure component heat capacity of the chemicals in the
mixture at its current temperature, in units of [J/kg/K].
Examples
--------
>>> Mixture(['benzene', 'toluene'], ws=[0.5, 0.5], T=320).Cpgs
[1146.5360555565146, 1212.3488046342566]
'''
return [i.Cpg for i in self.Chemicals]
@property
def Cvgms(self):
r'''Gas-phase pure component ideal-gas contant-volume heat capacities
of the chemicals in the mixture at its current temperature, in units
of [J/mol/K]. Subtracts R from the ideal-gas heat capacities; does not
include pressure-compensation from an equation of state.
Examples
--------
>>> Mixture(['benzene', 'toluene'], ws=[0.5, 0.5], T=320).Cvgms
[81.2435811258616, 103.38944354788907]
'''
return [i.Cvgm for i in self.Chemicals]
@property
def Cvgs(self):
r'''Gas-phase pure component ideal-gas contant-volume heat capacities
of the chemicals in the mixture at its current temperature, in units of
[J/kg/K]. Subtracts R from the ideal-gas heat capacity; does not include
pressure-compensation from an equation of state.
Examples
--------
>>> Mixture(['benzene', 'toluene'], ws=[0.5, 0.5], T=320).Cvgs
[1040.093040003431, 1122.1100117398266]
'''
return [i.Cvg for i in self.Chemicals]
@property
def isentropic_exponents(self):
r'''Gas-phase pure component ideal-gas isentropic exponent of the
chemicals in the mixture at its current temperature, [dimensionless].
Does not include pressure-compensation from an equation of state.
Examples
--------
>>> Mixture(['benzene', 'toluene'], ws=[0.5, 0.5], T=320).isentropic_exponents
[1.1023398979313739, 1.080418846592871]
'''
return [i.isentropic_exponent for i in self.Chemicals]
@property
def Vmss(self):
r'''Pure component solid-phase molar volumes of the chemicals in the
mixture at its current temperature, in units of [m^3/mol].
Examples
--------
>>> Mixture(['iron'], ws=[1], T=320).Vmss
[7.09593392630242e-06]
'''
return [i.Vms for i in self.Chemicals]
@property
def Vmls(self):
r'''Pure component liquid-phase molar volumes of the chemicals in the
mixture at its current temperature and pressure, in units of [m^3/mol].
Examples
--------
>>> Mixture(['benzene', 'toluene'], ws=[0.5, 0.5], T=320).Vmls
[9.188896727673715e-05, 0.00010946199496993461]
'''
return [i.Vml for i in self.Chemicals]
@property
def Vmgs(self):
r'''Pure component gas-phase molar volumes of the chemicals in the
mixture at its current temperature and pressure, in units of [m^3/mol].
Examples
--------
>>> Mixture(['benzene', 'toluene'], ws=[0.5, 0.5], T=320).Vmgs
[0.024929001982294974, 0.024150186467130488]
'''
return [i.Vmg for i in self.Chemicals]
@property
def rhoss(self):
r'''Pure component solid-phase mass density of the chemicals in the
mixture at its current temperature, in units of [kg/m^3].
Examples
--------
>>> Mixture(['iron'], ws=[1], T=320).rhoss
[7869.999999999994]
'''
return [i.rhos for i in self.Chemicals]
@property
def rhols(self):
r'''Pure-component liquid-phase mass density of the chemicals in the
mixture at its current temperature and pressure, in units of [kg/m^3].
Examples
--------
>>> Mixture(['benzene', 'toluene'], ws=[0.5, 0.5], T=320).rhols
[850.0676666084917, 841.7389069631628]
'''
return [i.rhol for i in self.Chemicals]
@property
def rhogs(self):
r'''Pure-component gas-phase mass densities of the chemicals in the
mixture at its current temperature and pressure, in units of [kg/m^3].
Examples
--------
>>> Mixture(['benzene', 'toluene'], ws=[0.5, 0.5], T=320).rhogs
[3.1333721283939258, 3.8152260283954584]
'''
return [i.rhog for i in self.Chemicals]
@property
def rhosms(self):
r'''Pure component molar densities of the chemicals in the solid phase
at the current temperature and pressure, in units of [mol/m^3].
Examples
--------
>>> Mixture(['iron'], ws=[1], T=320).rhosms
[140925.7767033753]
'''
return [i.rhosm for i in self.Chemicals]
@property
def rholms(self):
r'''Pure component molar densities of the chemicals in the mixture in
the liquid phase at the current temperature and pressure, in units of
[mol/m^3].
Examples
--------
>>> Mixture(['benzene', 'toluene'], ws=[0.5, 0.5], T=320).rholms
[10882.699301520635, 9135.590853014008]
'''
return [i.rholm for i in self.Chemicals]
@property
def rhogms(self):
r'''Pure component molar densities of the chemicals in the gas phase at
the current temperature and pressure, in units of [mol/m^3].
Examples
--------
>>> Mixture(['benzene', 'toluene'], ws=[0.5, 0.5], T=320).rhogms
[40.11392035309789, 41.407547778608084]
'''
return [i.rhogm for i in self.Chemicals]
@property
def Zss(self):
r'''Pure component compressibility factors of the chemicals in the
mixture in the solid phase at the current temperature and pressure,
[dimensionless].
Examples
--------
>>> Mixture(['palladium'], ws=[1]).Zss
[0.00036248477437931853]
'''
return [i.Zs for i in self.Chemicals]
@property
def Zls(self):
r'''Pure component compressibility factors of the chemicals in the
liquid phase at the current temperature and pressure, [dimensionless].
Examples
--------
>>> Mixture(['benzene', 'toluene'], ws=[0.5, 0.5], T=320).Zls
[0.0034994191720201235, 0.004168655010037687]
'''
return [i.Zl for i in self.Chemicals]
@property
def Zgs(self):
r'''Pure component compressibility factors of the chemicals in the
mixture in the gas phase at the current temperature and pressure,
[dimensionless].
Examples
--------
>>> Mixture(['benzene', 'toluene'], ws=[0.5, 0.5], T=320).Zgs
[0.9493743379816593, 0.9197146081359057]
'''
return [i.Zg for i in self.Chemicals]
@property
def SGs(self):
r'''Specific gravity of a hypothetical solid phase of the mixture at the
specified temperature and pressure, [dimensionless].
The reference condition is water at 4 °C and 1 atm
(rho=999.017 kg/m^3). The SG varries with temperature and pressure
but only very slightly.
'''
rhos = self.rhos
if rhos is not None:
return SG(rhos)
return None
@property
def SGl(self):
r'''Specific gravity of a hypothetical liquid phase of the mixture at
the specified temperature and pressure, [dimensionless].
The reference condition is water at 4 °C and 1 atm
(rho=999.017 kg/m^3). For liquids, SG is defined that the reference
chemical's T and P are fixed, but the chemical itself varies with
the specified T and P.
Examples
--------
>>> Mixture('water', ws=[1], T=365).SGl
0.9650065522428539
'''
rhol = self.rhol
if rhol is not None:
return SG(rhol)
return None
@property
def isobaric_expansion_ls(self):
r'''Pure component isobaric (constant-pressure) expansions of the
chemicals in the mixture in the liquid phase at its current temperature
and pressure, in units of [1/K].
.. math::
\beta = \frac{1}{V}\left(\frac{\partial V}{\partial T} \right)_P
Examples
--------
>>> Mixture(['benzene', 'toluene'], ws=[0.5, 0.5], T=320).isobaric_expansion_ls
[0.0012736035771253886, 0.0011234157437069571]
'''
return [i.isobaric_expansion_l for i in self.Chemicals]
@property
def isobaric_expansion_gs(self):
r'''Pure component isobaric (constant-pressure) expansions of the
chemicals in the mixture in the gas phase at its current temperature
and pressure, in units of [1/K].
.. math::
\beta = \frac{1}{V}\left(\frac{\partial V}{\partial T} \right)_P
Examples
--------
>>> Mixture(['benzene', 'toluene'], ws=[0.5, 0.5], T=320).isobaric_expansion_gs
[0.0038091518363900499, 0.0043556759306508453]
'''
return [i.isobaric_expansion_g for i in self.Chemicals]
@property
def muls(self):
r'''Pure component viscosities of the chemicals in the mixture in the
liquid phase at its current temperature and pressure, in units of
[Pa*s].
Examples
--------
>>> Mixture(['benzene', 'toluene'], ws=[0.5, 0.5], T=320).muls
[0.00045545522798131764, 0.00043274394349114754]
'''
return [i.mul for i in self.Chemicals]
@property
def mugs(self):
r'''Pure component viscosities of the chemicals in the mixture in the
gas phase at its current temperature and pressure, in units of [Pa*s].
Examples
--------
>>> Mixture(['benzene', 'toluene'], ws=[0.5, 0.5], T=320).mugs
[8.082880451060605e-06, 7.442602145854158e-06]
'''
return [i.mug for i in self.Chemicals]
@property
def kls(self):
r'''Pure component thermal conductivities of the chemicals in the
mixture in the liquid phase at its current temperature and pressure, in
units of [W/m/K].
Examples
--------
>>> Mixture(['benzene', 'toluene'], ws=[0.5, 0.5], T=320).kls
[0.13391538485205587, 0.12429339088930591]
'''
return [i.kl for i in self.Chemicals]
@property
def kgs(self):
r'''Pure component thermal conductivies of the chemicals in the mixture
in the gas phase at its current temperature and pressure, in units of
[W/m/K].
Examples
--------
>>> Mixture(['benzene', 'toluene'], ws=[0.5, 0.5], T=320).kgs
[0.011865404482987936, 0.010981336502491088]
'''
return [i.kg for i in self.Chemicals]
@property
def sigmas(self):
r'''Pure component surface tensions of the chemicals in the mixture at
its current temperature, in units of [N/m].
Examples
--------
>>> Mixture(['benzene', 'toluene'], ws=[0.5, 0.5], T=320).sigmas
[0.02533469712937521, 0.025254723406585546]
'''
return [i.sigma for i in self.Chemicals]
@property
def permittivites(self):
r'''Pure component relative permittivities of the chemicals in the
mixture at its current temperature, [dimensionless].
Examples
--------
>>> Mixture(['benzene', 'hexane'], ws=[0.5, 0.5], T=320).permittivites
[2.23133472, 1.8508128]
'''
return [i.permittivity for i in self.Chemicals]
@property
def JTls(self):
r'''Pure component Joule Thomson coefficients of the chemicals in the
mixture in the liquid phase at its current temperature and pressure, in
units of [K/Pa].
.. math::
\mu_{JT} = \left(\frac{\partial T}{\partial P}\right)_H = \frac{1}{C_p}
\left[T \left(\frac{\partial V}{\partial T}\right)_P - V\right]
= \frac{V}{C_p}\left(\beta T-1\right)
Examples
--------
>>> Mixture(['benzene', 'hexane'], ws=[0.5, 0.5], T=320).JTls
[-3.8633730709853161e-07, -3.464395792560331e-07]
'''
return [i.JTl for i in self.Chemicals]
@property
def JTgs(self):
r'''Pure component Joule Thomson coefficients of the chemicals in the
mixture in the gas phase at its current temperature and pressure, in
units of [K/Pa].
.. math::
\mu_{JT} = \left(\frac{\partial T}{\partial P}\right)_H = \frac{1}{C_p}
\left[T \left(\frac{\partial V}{\partial T}\right)_P - V\right]
= \frac{V}{C_p}\left(\beta T-1\right)
Examples
--------
>>> Mixture(['benzene', 'hexane'], ws=[0.5, 0.5], T=320).JTgs
[6.0940046688790938e-05, 4.1290005523287549e-05]
'''
return [i.JTg for i in self.Chemicals]
@property
def nuls(self):
r'''Pure component kinematic viscosities of the liquid phase of the
chemicals in the mixture at its current temperature and pressure, in
units of [m^2/s].
.. math::
\nu = \frac{\mu}{\rho}
Examples
--------
>>> Mixture(['benzene', 'hexane'], ws=[0.5, 0.5], T=320).nuls
[5.357870271650772e-07, 3.8127962283230277e-07]
'''
return [i.nul for i in self.Chemicals]
@property
def nugs(self):
r'''Pure component kinematic viscosities of the gas phase of the
chemicals in the mixture at its current temperature and pressure, in
units of [m^2/s].
.. math::
\nu = \frac{\mu}{\rho}
Examples
--------
>>> Mixture(['benzene', 'hexane'], ws=[0.5, 0.5], T=320).nugs
[5.357870271650772e-07, 3.8127962283230277e-07]
'''
return [i.nul for i in self.Chemicals]
@property
def alphals(self):
r'''Pure component thermal diffusivities of the chemicals in the
mixture in the liquid phase at the current temperature and pressure, in
units of [m^2/s].
.. math::
\alpha = \frac{k}{\rho Cp}
Examples
--------
>>> Mixture(['benzene', 'hexane'], ws=[0.5, 0.5], T=320).alphals
[8.732683564481583e-08, 7.57355434073289e-08]
'''
return [i.alphal for i in self.Chemicals]
@property
def alphags(self):
r'''Pure component thermal diffusivities of the chemicals in the
mixture in the gas phase at the current temperature and pressure, in
units of [m^2/s].
.. math::
\alpha = \frac{k}{\rho Cp}
Examples
--------
>>> Mixture(['benzene', 'hexane'], ws=[0.5, 0.5], T=320).alphags
[3.3028044028118324e-06, 2.4412958544059014e-06]
'''
return [i.alphag for i in self.Chemicals]
@property
def Prls(self):
r'''Pure component Prandtl numbers of the liquid phase of the chemicals
in the mixture at its current temperature and pressure, [dimensionless].
.. math::
Pr = \frac{C_p \mu}{k}
Examples
--------
>>> Mixture(['benzene', 'hexane'], ws=[0.5, 0.5], T=320).Prls
[6.13542244155373, 5.034355147908088]
'''
return [i.Prl for i in self.Chemicals]
@property
def Prgs(self):
r'''Pure component Prandtl numbers of the gas phase of the chemicals
in the mixture at its current temperature and pressure, [dimensionless].
.. math::
Pr = \frac{C_p \mu}{k}
Examples
--------
>>> Mixture(['benzene', 'hexane'], ws=[0.5, 0.5], T=320).Prgs
[0.7810364900059606, 0.784358381123896]
'''
return [i.Prg for i in self.Chemicals]
@property
def solubility_parameters(self):
r'''Pure component solubility parameters of the chemicals in the
mixture at its current temperature and pressure, in units of [Pa^0.5].
.. math::
\delta = \sqrt{\frac{\Delta H_{vap} - RT}{V_m}}
Examples
--------
>>> Mixture(['benzene', 'hexane'], ws=[0.5, 0.5], T=320).solubility_parameters
[18062.51359608708, 14244.12852702228]
'''
return [i.solubility_parameter for i in self.Chemicals]
@property
def Parachors(self):
r'''Pure component Parachor parameters of the chemicals in the
mixture at its current temperature and pressure, in units
of [N^0.25*m^2.75/mol].
.. math::
P = \frac{\sigma^{0.25} MW}{\rho_L - \rho_V}
Calculated based on surface tension, density of the liquid and gas
phase, and molecular weight. For uses of this property, see
:obj:`thermo.utils.Parachor`.
Examples
--------
>>> Mixture(['benzene', 'hexane'], ws=[0.5, 0.5], T=320).Parachors
[3.6795616000855504e-05, 4.82947303150274e-05]
'''
return [i.Parachor for i in self.Chemicals]
### Overall mixture properties
@property
def rhol(self):
r'''Liquid-phase mass density of the mixture at its current
temperature, pressure, and composition in units of [kg/m^3]. For
calculation of this property at other temperatures, pressures,
compositions or specifying manually the method used to calculate it,
and more - see the object oriented interface
:obj:`thermo.volume.VolumeLiquidMixture`; each Mixture instance
creates one to actually perform the calculations. Note that that
interface provides output in molar units.
Examples
--------
>>> Mixture(['o-xylene'], ws=[1], T=297).rhol
876.9946785618097
'''
Vml = self.Vml
if Vml:
return Vm_to_rho(Vml, self.MWl)
return None
@property
def rhog(self):
r'''Gas-phase mass density of the mixture at its current temperature,
pressure, and composition in units of [kg/m^3]. For calculation of this
property at other temperatures, pressures, or compositions or
specifying manually the method used to calculate it, and more - see the
object oriented interface :obj:`thermo.volume.VolumeGasMixture`; each
Mixture instance creates one to actually perform the calculations. Note
that that interface provides output in molar units.
Examples
--------
>>> Mixture(['hexane'], ws=[1], T=300, P=2E5).rhog
7.914447603999089
'''
Vmg = self.Vmg
if Vmg:
return Vm_to_rho(Vmg, self.MWg)
return None
@property
def rholm(self):
r'''Molar density of the mixture in the liquid phase at the
current temperature, pressure, and composition in units of [mol/m^3].
Utilizes the object oriented interface and
:obj:`thermo.volume.VolumeLiquidMixture` to perform the actual
calculation of molar volume.
Examples
--------
>>> Mixture(['water'], ws=[1], T=300).rholm
55317.352773503124
'''
Vml = self.Vml
if Vml:
return 1./Vml
return None
@property
def rhogm(self):
r'''Molar density of the mixture in the gas phase at the
current temperature, pressure, and composition in units of [mol/m^3].
Utilizes the object oriented interface and
:obj:`thermo.volume.VolumeGasMixture` to perform the actual
calculation of molar volume.
Examples
--------
>>> Mixture(['water'], ws=[1], T=500).rhogm
24.467426039789093
'''
Vmg = self.Vmg
if Vmg:
return 1./Vmg
return None
@property
def Zl(self):
r'''Compressibility factor of the mixture in the liquid phase at the
current temperature, pressure, and composition, [dimensionless].
Utilizes the object oriented interface and
:obj:`thermo.volume.VolumeLiquidMixture` to perform the actual
calculation of molar volume.
Examples
--------
>>> Mixture(['water'], ws=[1]).Zl
0.0007385375470263454
'''
Vml = self.Vml
if Vml:
return Z(self.T, self.P, Vml)
return None
@property
def Zg(self):
r'''Compressibility factor of the mixture in the gas phase at the
current temperature, pressure, and composition, [dimensionless].
Utilizes the object oriented interface and
:obj:`thermo.volume.VolumeGasMixture` to perform the actual calculation
of molar volume.
Examples
--------
>>> Mixture(['hexane'], ws=[1], T=300, P=1E5).Zg
0.9403859376888885
'''
Vmg = self.Vmg
if Vmg:
return Z(self.T, self.P, Vmg)
return None
@property
def Cpsm(self):
r'''Solid-phase heat capacity of the mixture at its current temperature
and composition, in units of [J/mol/K]. For calculation of this property
at other temperatures or compositions, or specifying manually the
method used to calculate it, and more - see the object oriented
interface :obj:`thermo.heat_capacity.HeatCapacitySolidMixture`; each
Mixture instance creates one to actually perform the calculations.
Examples
--------
>>> Mixture(['silver', 'platinum'], ws=[0.95, 0.05]).Cpsm
25.32745796347474
'''
return self.HeatCapacitySolidMixture(self.T, self.P, self.zs, self.ws)
@property
def Cplm(self):
r'''Liquid-phase heat capacity of the mixture at its current
temperature and composition, in units of [J/mol/K]. For calculation of
this property at other temperatures or compositions, or specifying
manually the method used to calculate it, and more - see the object
oriented interface :obj:`thermo.heat_capacity.HeatCapacityLiquidMixture`;
each Mixture instance creates one to actually perform the calculations.
Examples
--------
>>> Mixture(['toluene', 'decane'], ws=[.9, .1], T=300).Cplm
168.29127923518843
'''
return self.HeatCapacityLiquidMixture(self.T, self.P, self.xs, self.wsl)
@property
def Cpgm(self):
r'''Gas-phase heat capacity of the mixture at its current temperature
and composition, in units of [J/mol/K]. For calculation of this property
at other temperatures or compositions, or specifying manually the
method used to calculate it, and more - see the object oriented
interface :obj:`thermo.heat_capacity.HeatCapacityGasMixture`; each
Mixture instance creates one to actually perform the calculations.
Examples
--------
>>> Mixture(['oxygen', 'nitrogen'], ws=[.4, .6], T=350, P=1E6).Cpgm
29.361044582498046
'''
return self.HeatCapacityGasMixture(self.T, self.P, self.ys, self.wsg)
@property
def Cps(self):
r'''Solid-phase heat capacity of the mixture at its current temperature
and composition, in units of [J/kg/K]. For calculation of this property
at other temperatures or compositions, or specifying manually the
method used to calculate it, and more - see the object oriented
interface :obj:`thermo.heat_capacity.HeatCapacitySolidMixture`; each
Mixture instance creates one to actually perform the calculations. Note
that that interface provides output in molar units.
Examples
--------
>>> Mixture(['silver', 'platinum'], ws=[0.95, 0.05]).Cps
229.55166388430328
'''
Cpsm = self.HeatCapacitySolidMixture(self.T, self.P, self.zs, self.ws)
if Cpsm:
return property_molar_to_mass(Cpsm, self.MW)
return None
@property
def Cpl(self):
r'''Liquid-phase heat capacity of the mixture at its current
temperature and composition, in units of [J/kg/K]. For calculation of
this property at other temperatures or compositions, or specifying
manually the method used to calculate it, and more - see the object
oriented interface :obj:`thermo.heat_capacity.HeatCapacityLiquidMixture`;
each Mixture instance creates one to actually perform the calculations.
Note that that interface provides output in molar units.
Examples
--------
>>> Mixture(['water', 'sodium chloride'], ws=[.9, .1], T=301.5).Cpl
3735.4604049449786
'''
Cplm = self.HeatCapacityLiquidMixture(self.T, self.P, self.xs, self.wsl)
if Cplm:
return property_molar_to_mass(Cplm, self.MWl)
return None
@property
def Cpg(self):
r'''Gas-phase heat capacity of the mixture at its current temperature ,
and composition in units of [J/kg/K]. For calculation of this property at
other temperatures or compositions, or specifying manually the method
used to calculate it, and more - see the object oriented interface
:obj:`thermo.heat_capacity.HeatCapacityGasMixture`; each Mixture
instance creates one to actually perform the calculations. Note that
that interface provides output in molar units.
Examples
--------
>>> Mixture(['oxygen', 'nitrogen'], ws=[.4, .6], T=350, P=1E6).Cpg
995.8911053614883
'''
Cpgm = self.HeatCapacityGasMixture(self.T, self.P, self.ys, self.wsg)
if Cpgm:
return property_molar_to_mass(Cpgm, self.MWg)
return None
@property
def Cvgm(self):
r'''Gas-phase ideal-gas contant-volume heat capacity of the mixture at
its current temperature and composition, in units of [J/mol/K]. Subtracts R from
the ideal-gas heat capacity; does not include pressure-compensation
from an equation of state.
Examples
--------
>>> Mixture(['water'], ws=[1], T=520).Cvgm
27.13366316134193
'''
Cpgm = self.HeatCapacityGasMixture(self.T, self.P, self.ys, self.wsg)
if Cpgm:
return Cpgm - R
return None
@property
def Cvg(self):
r'''Gas-phase ideal-gas contant-volume heat capacity of the mixture at
its current temperature, in units of [J/kg/K]. Subtracts R from
the ideal-gas heat capacity; does not include pressure-compensation
from an equation of state.
Examples
--------
>>> Mixture(['water'], ws=[1], T=520).Cvg
1506.1471795798861
'''
Cvgm = self.Cvgm
if Cvgm:
return property_molar_to_mass(Cvgm, self.MWg)
return None
@property
def speed_of_sound_g(self):
r'''Gas-phase speed of sound of the mixture at its
current temperature, [m/s].
Examples
--------
>>> Mixture(['nitrogen'], ws=[1]).speed_of_sound_g
351.77445481641661
'''
dP_dV = 1.0/self.VolumeGasMixture.property_derivative_P(T=self.T, P=self.P,
zs=self.ys, ws=self.wsg, order=1)
return speed_of_sound(V=self.Vmg, dP_dV=dP_dV, Cp=self.property_package.Cpgm,
Cv=self.property_package.Cvgm, MW=self.MWg)
@property
def speed_of_sound_l(self):
r'''Liquid-phase speed of sound of the mixture at its
current temperature, [m/s].
Examples
--------
>>> Mixture(['toluene'], P=1E5, T=300, ws=[1]).speed_of_sound_l
1116.0852487852942
'''
dP_dV = 1.0/self.VolumeLiquidMixture.property_derivative_P(T=self.T, P=self.P,
zs=self.xs, ws=self.wsl, order=1)
return speed_of_sound(V=self.Vml, dP_dV=dP_dV, Cp=self.property_package.Cplm,
Cv=self.property_package.Cvlm, MW=self.MWl)
@property
def speed_of_sound(self):
r'''Bulk speed of sound of the mixture at its
current temperature, [m/s].
Examples
--------
>>> Mixture(['toluene'], P=1E5, VF=0.5, ws=[1]).speed_of_sound
478.99527258140211
'''
if self.phase == 'l':
return self.speed_of_sound_l
elif self.phase == 'g':
return self.speed_of_sound_g
elif self.phase == 'l/g':
return self.speed_of_sound_g*self.x + (1.0 - self.x)*self.speed_of_sound_l
@property
def isentropic_exponent(self):
r'''Gas-phase ideal-gas isentropic exponent of the mixture at its
current temperature, [dimensionless]. Does not include
pressure-compensation from an equation of state.
Examples
--------
>>> Mixture(['hydrogen'], ws=[1]).isentropic_exponent
1.405237786321222
'''
Cp, Cv = self.Cpg, self.Cvg
if Cp and Cv:
return isentropic_exponent(Cp, Cv)
return None
@property
def Bvirial(self):
r'''Second virial coefficient of the gas phase of the mixture at its
current temperature, pressure, and composition in units of [mol/m^3].
This property uses the object-oriented interface
:obj:`thermo.volume.VolumeGasMixture`, converting its result with
:obj:`thermo.utils.B_from_Z`.
Examples
--------
>>> Mixture(['hexane'], ws=[1], T=300, P=1E5).Bvirial
-0.001486976173801296
'''
if self.Vmg:
return B_from_Z(self.Zg, self.T, self.P)
return None
@property
def JTl(self):
r'''Joule Thomson coefficient of the liquid phase of the mixture if one
exists at its current temperature and pressure, in units of [K/Pa].
.. math::
\mu_{JT} = \left(\frac{\partial T}{\partial P}\right)_H = \frac{1}{C_p}
\left[T \left(\frac{\partial V}{\partial T}\right)_P - V\right]
= \frac{V}{C_p}\left(\beta T-1\right)
Examples
--------
>>> Mixture(['dodecane'], ws=[1], T=400).JTl
-3.193910574559279e-07
'''
Vml, Cplm, isobaric_expansion_l = self.Vml, self.Cplm, self.isobaric_expansion_l
if all((Vml, Cplm, isobaric_expansion_l)):
return Joule_Thomson(T=self.T, V=Vml, Cp=Cplm, beta=isobaric_expansion_l)
return None
@property
def JTg(self):
r'''Joule Thomson coefficient of the gas phase of the mixture if one
exists at its current temperature and pressure, in units of [K/Pa].
.. math::
\mu_{JT} = \left(\frac{\partial T}{\partial P}\right)_H = \frac{1}{C_p}
\left[T \left(\frac{\partial V}{\partial T}\right)_P - V\right]
= \frac{V}{C_p}\left(\beta T-1\right)
Examples
--------
>>> Mixture(['dodecane'], ws=[1], T=400, P=1000).JTg
5.4089897835384913e-05
'''
Vmg, Cpgm, isobaric_expansion_g = self.Vmg, self.Cpgm, self.isobaric_expansion_g
if all((Vmg, Cpgm, isobaric_expansion_g)):
return Joule_Thomson(T=self.T, V=Vmg, Cp=Cpgm, beta=isobaric_expansion_g)
return None
@property
def nul(self):
r'''Kinematic viscosity of the liquid phase of the mixture if one
exists at its current temperature and pressure, in units of [m^2/s].
.. math::
\nu = \frac{\mu}{\rho}
Examples
--------
>>> Mixture(['methane'], ws=[1], T=110).nul
2.858088468937333e-07
'''
mul, rhol = self.mul, self.rhol
if all([mul, rhol]):
return nu_mu_converter(mu=mul, rho=rhol)
return None
@property
def nug(self):
r'''Kinematic viscosity of the gas phase of the mixture if one exists
at its current temperature and pressure, in units of [m^2/s].
.. math::
\nu = \frac{\mu}{\rho}
Examples
--------
>>> Mixture(['methane'], ws=[1], T=115).nug
2.5118460023343146e-06
'''
mug, rhog = self.mug, self.rhog
if all([mug, rhog]):
return nu_mu_converter(mu=mug, rho=rhog)
return None
@property
def alphal(self):
r'''Thermal diffusivity of the liquid phase of the mixture if one
exists at its current temperature and pressure, in units of [m^2/s].
.. math::
\alpha = \frac{k}{\rho Cp}
Examples
--------
>>> Mixture(['nitrogen'], ws=[1], T=70).alphal
9.444949636299626e-08
'''
kl, rhol, Cpl = self.kl, self.rhol, self.Cpl
if all([kl, rhol, Cpl]):
return thermal_diffusivity(k=kl, rho=rhol, Cp=Cpl)
return None
@property
def alphag(self):
r'''Thermal diffusivity of the gas phase of the mixture if one exists
at its current temperature and pressure, in units of [m^2/s].
.. math::
\alpha = \frac{k}{\rho Cp}
Examples
--------
>>> Mixture(['ammonia'], ws=[1]).alphag
1.6968517002221566e-05
'''
kg, rhog, Cpg = self.kg, self.rhog, self.Cpg
if all([kg, rhog, Cpg]):
return thermal_diffusivity(k=kg, rho=rhog, Cp=Cpg)
return None
@property
def Prl(self):
r'''Prandtl number of the liquid phase of the mixture if one exists at
its current temperature and pressure, [dimensionless].
.. math::
Pr = \frac{C_p \mu}{k}
Examples
--------
>>> Mixture(['nitrogen'], ws=[1], T=70).Prl
2.782821450148889
'''
Cpl, mul, kl = self.Cpl, self.mul, self.kl
if all([Cpl, mul, kl]):
return Prandtl(Cp=Cpl, mu=mul, k=kl)
return None
@property
def Prg(self):
r'''Prandtl number of the gas phase of the mixture if one exists at its
current temperature and pressure, [dimensionless].
.. math::
Pr = \frac{C_p \mu}{k}
Examples
--------
>>> Mixture(['NH3'], ws=[1]).Prg
0.8472637319330079
'''
Cpg, mug, kg = self.Cpg, self.mug, self.kg
if all([Cpg, mug, kg]):
return Prandtl(Cp=Cpg, mu=mug, k=kg)
return None
@property
def Parachor(self):
r'''Parachor of the mixture at its
current temperature and pressure, in units of [N^0.25*m^2.75/mol].
.. math::
P = \frac{\sigma^{0.25} MW}{\rho_L - \rho_V}
Calculated based on surface tension, density of the liquid and gas
phase, and molecular weight. For uses of this property, see
:obj:`thermo.utils.Parachor`.
Examples
--------
>>> Mixture(['benzene', 'hexane'], ws=[0.5, 0.5], T=320).Parachor
4.233407085050756e-05
'''
sigma, rhol, rhog = self.sigma, self.rhol, self.rhog
if all((sigma, rhol, rhog, self.MW)):
return Parachor(sigma=sigma, MW=self.MW, rhol=rhol, rhog=rhog)
return None
### Properties from Mixture objects
@property
def Vml(self):
r'''Liquid-phase molar volume of the mixture at its current
temperature, pressure, and composition in units of [m^3/mol]. For
calculation of this property at other temperatures or pressures or
compositions, or specifying manually the method used to calculate it,
and more - see the object oriented interface
:obj:`thermo.volume.VolumeLiquidMixture`; each Mixture instance
creates one to actually perform the calculations.
Examples
--------
>>> Mixture(['cyclobutane'], ws=[1], T=225).Vml
7.42395423425395e-05
'''
return self.VolumeLiquidMixture(T=self.T, P=self.P, zs=self.xs, ws=self.wsl)
@property
def Vmg(self):
r'''Gas-phase molar volume of the mixture at its current
temperature, pressure, and composition in units of [m^3/mol]. For
calculation of this property at other temperatures or pressures or
compositions, or specifying manually the method used to calculate it,
and more - see the object oriented interface
:obj:`thermo.volume.VolumeGasMixture`; each Mixture instance
creates one to actually perform the calculations.
Examples
--------
>>> Mixture(['hexane'], ws=[1], T=300, P=2E5).Vmg
0.010888694235142216
'''
return self.VolumeGasMixture(T=self.T, P=self.P, zs=self.ys, ws=self.wsg)
@property
def SGg(self):
r'''Specific gravity of a hypothetical gas phase of the mixture, .
[dimensionless]. The reference condition is air at 15.6 °C (60 °F) and
1 atm (rho=1.223 kg/m^3). The definition for gases uses the
compressibility factor of the reference gas and the mixture both at the
reference conditions, not the conditions of the mixture.
Examples
--------
>>> Mixture('argon').SGg
1.3800407778218216
'''
Vmg = self.VolumeGasMixture(T=288.70555555555552, P=101325, zs=self.ys, ws=self.wsg)
if Vmg:
rho = Vm_to_rho(Vmg, self.MW)
return SG(rho, rho_ref=1.2231876628642968) # calculated with Mixture
return None
@property
def mul(self):
r'''Viscosity of the mixture in the liquid phase at its current
temperature, pressure, and composition in units of [Pa*s].
For calculation of this property at other temperatures and pressures,
or specifying manually the method used to calculate it, and more - see
the object oriented interface
:obj:`thermo.viscosity.ViscosityLiquidMixture`; each Mixture instance
creates one to actually perform the calculations.
Examples
--------
>>> Mixture(['water'], ws=[1], T=320).mul
0.0005767262693751547
'''
return self.ViscosityLiquidMixture(self.T, self.P, self.xs, self.wsl)
@property
def mug(self):
r'''Viscosity of the mixture in the gas phase at its current
temperature, pressure, and composition in units of [Pa*s].
For calculation of this property at other temperatures and pressures,
or specifying manually the method used to calculate it, and more - see
the object oriented interface
:obj:`thermo.viscosity.ViscosityGasMixture`; each Mixture instance
creates one to actually perform the calculations.
Examples
--------
>>> Mixture(['water'], ws=[1], T=500).mug
1.7298722343367148e-05
'''
return self.ViscosityGasMixture(self.T, self.P, self.ys, self.wsg)
@property
def sigma(self):
r'''Surface tension of the mixture at its current temperature and
composition, in units of [N/m].
For calculation of this property at other temperatures,
or specifying manually the method used to calculate it, and more - see
the object oriented interface :obj:`thermo.interface.SurfaceTensionMixture`;
each Mixture instance creates one to actually perform the calculations.
Examples
--------
>>> Mixture(['water'], ws=[1], T=300, P=1E5).sigma
0.07176932405246211
'''
return self.SurfaceTensionMixture(self.T, self.P, self.xs, self.wsl)
@property
def kl(self):
r'''Thermal conductivity of the mixture in the liquid phase at its current
temperature, pressure, and composition in units of [Pa*s].
For calculation of this property at other temperatures and pressures,
or specifying manually the method used to calculate it, and more - see
the object oriented interface
:obj:`thermo.thermal_conductivity.ThermalConductivityLiquidMixture`;
each Mixture instance creates one to actually perform the calculations.
Examples
--------
>>> Mixture(['water'], ws=[1], T=320).kl
0.6369957248212118
'''
return self.ThermalConductivityLiquidMixture(self.T, self.P, self.xs, self.wsl)
@property
def kg(self):
r'''Thermal conductivity of the mixture in the gas phase at its current
temperature, pressure, and composition in units of [Pa*s].
For calculation of this property at other temperatures and pressures,
or specifying manually the method used to calculate it, and more - see
the object oriented interface
:obj:`thermo.thermal_conductivity.ThermalConductivityGasMixture`;
each Mixture instance creates one to actually perform the calculations.
Examples
--------
>>> Mixture(['water'], ws=[1], T=500).kg
0.036035173297862676
'''
return self.ThermalConductivityGasMixture(self.T, self.P, self.ys, self.wsg)
### Single-phase properties
@property
def Cp(self):
r'''Mass heat capacity of the mixture at its current phase and
temperature, in units of [J/kg/K].
Examples
--------
>>> w = Mixture(['water'], ws=[1])
>>> w.Cp, w.phase
(4180.597021827336, 'l')
>>> Pd = Mixture(['palladium'], ws=[1])
>>> Pd.Cp, Pd.phase
(234.26767209171211, 's')
'''
return phase_select_property(phase=self.phase, s=Mixture.Cps,
l=Mixture.Cpl, g=Mixture.Cpg, self=self)
@property
def Cpm(self):
r'''Molar heat capacity of the mixture at its current phase and
temperature, in units of [J/mol/K]. Available only if single phase.
Examples
--------
>>> Mixture(['ethylbenzene'], ws=[1], T=550, P=3E6).Cpm
294.18449553310046
'''
return phase_select_property(phase=self.phase, s=Mixture.Cpsm,
l=Mixture.Cplm, g=Mixture.Cpgm, self=self)
@property
def Vm(self):
r'''Molar volume of the mixture at its current phase and
temperature and pressure, in units of [m^3/mol].
Available only if single phase.
Examples
--------
>>> Mixture(['ethylbenzene'], ws=[1], T=550, P=3E6).Vm
0.00017758024401627633
'''
return phase_select_property(phase=self.phase, s=Mixture.Vms,
l=Mixture.Vml, g=Mixture.Vmg, self=self)
@property
def rho(self):
r'''Mass density of the mixture at its current phase and
temperature and pressure, in units of [kg/m^3].
Available only if single phase.
Examples
--------
>>> Mixture(['decane'], ws=[1], T=550, P=2E6).rho
498.67008448640604
'''
if self.phase == 'l/g':
# Volume fraction mixing rule for density
rhol, rhog = self.rhol, self.rhog
a, b = (1.0 - self.x)/rhol, self.x/rhog
return rhol*a/(a+b) + b/(a+b)*rhog
return phase_select_property(phase=self.phase, s=Mixture.rhos,
l=Mixture.rhol, g=Mixture.rhog, self=self)
@property
def rhom(self):
r'''Molar density of the mixture at its current phase and
temperature and pressure, in units of [mol/m^3].
Available only if single phase.
Examples
--------
>>> Mixture(['1-hexanol'], ws=[1]).rhom
7983.414573003429
'''
if self.phase == 'l/g':
# Volume fraction mixing rule for density
rholm, rhogm = self.rholm, self.rhogm
a, b = (1.0 - self.x)/rholm, self.x/rhogm
return rholm*a/(a+b) + b/(a+b)*rhogm
return phase_select_property(phase=self.phase, s=None, l=Mixture.rholm,
g=Mixture.rhogm, self=self)
@property
def Z(self):
r'''Compressibility factor of the mixture at its current phase and
temperature and pressure, [dimensionless].
Available only if single phase.
Examples
--------
>>> Mixture(['MTBE'], ws=[1], T=900, P=1E-2).Z
0.9999999999056374
'''
Vm = self.Vm
if Vm:
return Z(self.T, self.P, Vm)
return None
@property
def SG(self):
r'''Specific gravity of the mixture, [dimensionless].
For gas-phase conditions, this is calculated at 15.6 °C (60 °F) and 1
atm for the mixture and the reference fluid, air.
For liquid and solid phase conditions, this is calculated based on a
reference fluid of water at 4°C at 1 atm, but the with the liquid or
solid mixture's density at the currently specified conditions.
Examples
--------
>>> Mixture('MTBE').SG
0.7428160596603596
'''
return phase_select_property(phase=self.phase, s=Mixture.SGs,
l=Mixture.SGl, g=Mixture.SGg, self=self)
### Single-phase properties
@property
def isobaric_expansion(self):
r'''Isobaric (constant-pressure) expansion of the mixture at its
current phase, temperature, and pressure in units of [1/K].
Available only if single phase.
.. math::
\beta = \frac{1}{V}\left(\frac{\partial V}{\partial T} \right)_P
Examples
--------
>>> Mixture(['water'], ws=[1], T=647.1, P=22048320.0).isobaric_expansion
0.34074205839222449
'''
return phase_select_property(phase=self.phase, l=Mixture.isobaric_expansion_l,
g=Mixture.isobaric_expansion_g, self=self)
@property
def isobaric_expansion_g(self):
r'''Isobaric (constant-pressure) expansion of the gas phase of the
mixture at its current temperature and pressure, in units of [1/K].
Available only if single phase.
.. math::
\beta = \frac{1}{V}\left(\frac{\partial V}{\partial T} \right)_P
Examples
--------
>>> Mixture(['argon'], ws=[1], T=647.1, P=22048320.0).isobaric_expansion_g
0.0015661100323025273
'''
dV_dT = self.VolumeGasMixture.property_derivative_T(self.T, self.P, self.zs, self.ws)
Vm = self.Vmg
if dV_dT and Vm:
return isobaric_expansion(V=Vm, dV_dT=dV_dT)
@property
def isobaric_expansion_l(self):
r'''Isobaric (constant-pressure) expansion of the liquid phase of the
mixture at its current temperature and pressure, in units of [1/K].
Available only if single phase.
.. math::
\beta = \frac{1}{V}\left(\frac{\partial V}{\partial T} \right)_P
Examples
--------
>>> Mixture(['argon'], ws=[1], T=647.1, P=22048320.0).isobaric_expansion_l
0.001859152875154442
'''
dV_dT = self.VolumeLiquidMixture.property_derivative_T(self.T, self.P, self.zs, self.ws)
Vm = self.Vml
if dV_dT and Vm:
return isobaric_expansion(V=Vm, dV_dT=dV_dT)
@property
def JT(self):
r'''Joule Thomson coefficient of the mixture at its
current phase, temperature, and pressure in units of [K/Pa].
Available only if single phase.
.. math::
\mu_{JT} = \left(\frac{\partial T}{\partial P}\right)_H = \frac{1}{C_p}
\left[T \left(\frac{\partial V}{\partial T}\right)_P - V\right]
= \frac{V}{C_p}\left(\beta T-1\right)
Examples
--------
>>> Mixture(['water'], ws=[1]).JT
-2.2150394958666412e-07
'''
return phase_select_property(phase=self.phase, l=Mixture.JTl,
g=Mixture.JTg, self=self)
@property
def mu(self):
r'''Viscosity of the mixture at its current phase, temperature, and
pressure in units of [Pa*s].
Available only if single phase.
Examples
--------
>>> Mixture(['ethanol'], ws=[1], T=400).mu
1.1853097849748213e-05
'''
return phase_select_property(phase=self.phase, l=Mixture.mul,
g=Mixture.mug, self=self)
@property
def k(self):
r'''Thermal conductivity of the mixture at its current phase,
temperature, and pressure in units of [W/m/K].
Available only if single phase.
Examples
--------
>>> Mixture(['ethanol'], ws=[1], T=300).kl
0.16313594741877802
'''
return phase_select_property(phase=self.phase, s=None, l=Mixture.kl,
g=Mixture.kg, self=self)
@property
def nu(self):
r'''Kinematic viscosity of the the mixture at its current temperature,
pressure, and phase in units of [m^2/s].
Available only if single phase.
.. math::
\nu = \frac{\mu}{\rho}
Examples
--------
>>> Mixture(['argon'], ws=[1]).nu
1.3842643382482236e-05
'''
return phase_select_property(phase=self.phase, l=Mixture.nul,
g=Mixture.nug, self=self)
@property
def alpha(self):
r'''Thermal diffusivity of the mixture at its current temperature,
pressure, and phase in units of [m^2/s].
Available only if single phase.
.. math::
\alpha = \frac{k}{\rho Cp}
Examples
--------
>>> Mixture(['furfural'], ws=[1]).alpha
8.696537158635412e-08
'''
return phase_select_property(phase=self.phase, l=Mixture.alphal,
g=Mixture.alphag, self=self)
@property
def Pr(self):
r'''Prandtl number of the mixture at its current temperature,
pressure, and phase; [dimensionless].
Available only if single phase.
.. math::
Pr = \frac{C_p \mu}{k}
Examples
--------
>>> Mixture(['acetone'], ws=[1]).Pr
4.183039103542711
'''
return phase_select_property(phase=self.phase, l=Mixture.Prl,
g=Mixture.Prg, self=self)
### Standard state properties
@property
def Vml_STP(self):
r'''Liquid-phase molar volume of the mixture at 298.15 K and 101.325 kPa,
and the current composition in units of [m^3/mol].
Examples
--------
>>> Mixture(['cyclobutane'], ws=[1]).Vml_STP
8.143327329133706e-05
'''
return self.VolumeLiquidMixture(T=298.15, P=101325, zs=self.zs, ws=self.ws)
@property
def Vmg_STP(self):
r'''Gas-phase molar volume of the mixture at 298.15 K and 101.325 kPa,
and the current composition in units of [m^3/mol].
Examples
--------
>>> Mixture(['nitrogen'], ws=[1]).Vmg_STP
0.02445443688838904
'''
return self.VolumeGasMixture(T=298.15, P=101325, zs=self.zs, ws=self.ws)
@property
def rhol_STP(self):
r'''Liquid-phase mass density of the mixture at 298.15 K and 101.325 kPa,
and the current composition in units of [kg/m^3].
Examples
--------
>>> Mixture(['cyclobutane'], ws=[1]).rhol_STP
688.9851989526821
'''
Vml = self.Vml_STP
if Vml:
return Vm_to_rho(Vml, self.MW)
return None
@property
def rhog_STP(self):
r'''Gas-phase mass density of the mixture at 298.15 K and 101.325 kPa,
and the current composition in units of [kg/m^3].
Examples
--------
>>> Mixture(['nitrogen'], ws=[1]).rhog_STP
1.145534453639403
'''
Vmg = self.Vmg_STP
if Vmg:
return Vm_to_rho(Vmg, self.MW)
return None
@property
def Zl_STP(self):
r'''Liquid-phase compressibility factor of the mixture at 298.15 K and 101.325 kPa,
and the current composition, [dimensionless].
Examples
--------
>>> Mixture(['cyclobutane'], ws=[1]).Zl_STP
0.0033285083663950068
'''
Vml = self.Vml
if Vml:
return Z(self.T, self.P, Vml)
return None
@property
def Zg_STP(self):
r'''Gas-phase compressibility factor of the mixture at 298.15 K and 101.325 kPa,
and the current composition, [dimensionless].
Examples
--------
>>> Mixture(['nitrogen'], ws=[1]).Zg_STP
0.9995520809691023
'''
Vmg = self.Vmg
if Vmg:
return Z(self.T, self.P, Vmg)
return None
@property
def rholm_STP(self):
r'''Molar density of the mixture in the liquid phase at 298.15 K and 101.325 kPa,
and the current composition, in units of [mol/m^3].
Examples
--------
>>> Mixture(['water'], ws=[1]).rholm_STP
55344.59086372442
'''
Vml = self.Vml_STP
if Vml:
return 1./Vml
return None
@property
def rhogm_STP(self):
r'''Molar density of the mixture in the gas phase at 298.15 K and 101.325 kPa,
and the current composition, in units of [mol/m^3].
Examples
--------
>>> Mixture(['nitrogen'], ws=[1]).rhogm_STP
40.892374850585895
'''
Vmg = self.Vmg_STP
if Vmg:
return 1./Vmg
return None
@property
def API(self):
r'''API gravity of the hypothetical liquid phase of the mixture,
[degrees]. The reference condition is water at 15.6 °C (60 °F) and 1 atm
(rho=999.016 kg/m^3, standardized).
Examples
--------
>>> Mixture(['hexane', 'decane'], ws=[0.5, 0.5]).API
71.34707841728181
'''
Vml = self.VolumeLiquidMixture(T=288.70555555555552, P=101325, zs=self.zs, ws=self.ws)
if Vml:
rho = Vm_to_rho(Vml, self.MW)
sg = SG(rho, rho_ref=999.016)
return SG_to_API(sg)
def draw_2d(self, Hs=False): # pragma: no cover
r'''Interface for drawing a 2D image of all the molecules in the
mixture. Requires an HTML5 browser, and the libraries RDKit and
IPython. An exception is raised if either of these libraries is
absent.
Parameters
----------
Hs : bool
Whether or not to show hydrogen
Examples
--------
Mixture(['natural gas']).draw_2d()
'''
try:
from rdkit.Chem import Draw
if Hs:
mols = [i.rdkitmol_Hs for i in self.Chemicals]
else:
mols = [i.rdkitmol for i in self.Chemicals]
return Draw.MolsToImage(mols)
except:
return 'Rdkit is required for this feature.'
def Reynolds(self, V=None, D=None):
return Reynolds(V=V, D=D, rho=self.rho, mu=self.mu)
def Capillary(self, V=None):
return Capillary(V=V, mu=self.mu, sigma=self.sigma)
def Weber(self, V=None, D=None):
return Weber(V=V, L=D, rho=self.rho, sigma=self.sigma)
def Bond(self, L=None):
return Bond(rhol=self.rhol, rhog=self.rhog, sigma=self.sigma, L=L)
def Jakob(self, Tw=None):
return Jakob(Cp=self.Cp, Hvap=self.Hvap, Te=Tw-self.T)
def Grashof(self, Tw=None, L=None):
return Grashof(L=L, beta=self.isobaric_expansion, T1=Tw, T2=self.T,
rho=self.rho, mu=self.mu)
def Peclet_heat(self, V=None, D=None):
return Peclet_heat(V=V, L=D, rho=self.rho, Cp=self.Cp, k=self.k)
@property
def constants(self):
r'''Returns a :obj:`thermo.chemical_package.ChemicalConstantsPackage
instance with constants from the mixture, [-].
'''
try:
return self._constants
except AttributeError:
pass
from thermo.chemical_package import ChemicalConstantsPackage
self._constants = ChemicalConstantsPackage(CASs=self.CASs, names=self.names, MWs=self.MWs,
Tms=self.Tms, Tbs=self.Tbs,
# Critical state points
Tcs=self.Tcs, Pcs=self.Pcs, Vcs=self.Vcs, omegas=self.omegas,
Zcs=self.Zcs, rhocs=self.rhocms, rhocs_mass=self.rhocs,
# Phase change enthalpy
Hfus_Tms=self.Hfusms, Hfus_Tms_mass=self.Hfuss, Hvap_Tbs=self.Hvap_Tbms,
Hvap_Tbs_mass=self.Hvap_Tbs,
# Standard values
Vml_STPs=self.Vml_STPs, rhol_STPs=self.rholm_STPs, rhol_STPs_mass=self.rhol_STPs,
Vml_60Fs=self.Vml_60Fs, rhol_60Fs=self.rhoml_60Fs, rhol_60Fs_mass=self.rhol_60Fs,
# Reaction (ideal gas)
Hfgs=self.Hfgms, Hfgs_mass=self.Hfgs, Gfgs=self.Gfgms, Gfgs_mass=self.Gfgs,
Sfgs=self.Sfgms, Sfgs_mass=self.Sfgs, S0gs=self.S0gms, S0gs_mass=self.S0gs,
# Triple point
Tts=self.Tts, Pts=self.Pts, Hsub_Tts=self.Hsubms, Hsub_Tts_mass=self.Hsubs,
# Combustion
Hcs=self.Hcms, Hcs_mass=self.Hcs, Hcs_lower=self.Hcms_lower, Hcs_lower_mass=self.Hcs_lower,
# Fire safety
Tflashs=self.Tflashs, Tautoignitions=self.Tautoignitions, LFLs=self.LFLs, UFLs=self.UFLs,
# Other safety
TWAs=self.TWAs, STELs=self.STELs, Ceilings=self.Ceilings, Skins=self.Skins,
Carcinogens=self.Carcinogens, legal_statuses=self.legal_statuses, economic_statuses=self.economic_statuses,
# Environmental
GWPs=self.GWPs, ODPs=self.ODPs, logPs=self.logPs,
Psat_298s=self.Psat_298s, Hvap_298s=self.Hvapm_298s,
Hvap_298s_mass=self.Hvap_298s, Vml_Tms=self.Vml_Tms,
rhos_Tms=self.rhoms_Tm, rhos_Tms_mass=self.rhos_Tms, Vms_Tms=self.Vms_Tms,
# Analytical
RIs=self.RIs, RI_Ts=self.RI_Ts, conductivities=self.conductivities,
conductivity_Ts=self.conductivity_Ts,
# Odd constants
charges=self.charges, dipoles=self.dipoles, Stockmayers=self.Stockmayers,
molecular_diameters=self.molecular_diameters, Van_der_Waals_volumes=self.Van_der_Waals_volumes,
Van_der_Waals_areas=self.Van_der_Waals_areas, Parachors=self.Parachors, StielPolars=self.StielPolars,
atomss=self.atomss, atom_fractions=self.atom_fractionss,
similarity_variables=self.similarity_variables, phase_STPs=self.phase_STPs,
UNIFAC_Rs=self.UNIFAC_Rs, UNIFAC_Qs=self.UNIFAC_Qs, solubility_parameters=self.solubility_parameters_STP,
# Other identifiers
PubChems=self.PubChems, formulas=self.formulas, smiless=self.smiless, InChIs=self.InChIs,
InChI_Keys=self.InChI_Keys,
# Groups
UNIFAC_groups=self.UNIFAC_groups, UNIFAC_Dortmund_groups=self.UNIFAC_Dortmund_groups,
PSRK_groups=self.PSRK_groups)
return self._constants
def properties(self, copy_pures=True, copy_mixtures=True):
try:
return self._properties
except AttributeError:
pass
from thermo.chemical_package import PropertyCorrelationsPackage
constants = self.constants
kwargs = dict(constants=constants)
if copy_pures:
kwargs.update(VaporPressures=self.VaporPressures, SublimationPressures=self.SublimationPressures,
VolumeGases=self.VolumeGases, VolumeLiquids=self.VolumeLiquids, VolumeSolids=self.VolumeSolids,
HeatCapacityGases=self.HeatCapacityGases, HeatCapacityLiquids=self.HeatCapacityLiquids,
HeatCapacitySolids=self.HeatCapacitySolids,
ViscosityGases=self.ViscosityGases, ViscosityLiquids=self.ViscosityLiquids,
ThermalConductivityGases=self.ThermalConductivityGases, ThermalConductivityLiquids=self.ThermalConductivityLiquids,
EnthalpyVaporizations=self.EnthalpyVaporizations, EnthalpySublimations=self.EnthalpySublimations,
SurfaceTensions=self.SurfaceTensions, PermittivityLiquids=self.Permittivities)
if copy_mixtures:
kwargs.update(VolumeGasMixtureObj=self.VolumeGasMixture, VolumeLiquidMixtureObj=self.VolumeLiquidMixture,
VolumeSolidMixtureObj=self.VolumeSolidMixture,
HeatCapacityGasMixtureObj=self.HeatCapacityGasMixture,
HeatCapacityLiquidMixtureObj=self.HeatCapacityLiquidMixture,
HeatCapacitySolidMixtureObj=self.HeatCapacitySolidMixture,
ViscosityGasMixtureObj=self.ViscosityGasMixture,
ViscosityLiquidMixtureObj=self.ViscosityLiquidMixture,
ThermalConductivityGasMixtureObj=self.ThermalConductivityGasMixture,
ThermalConductivityLiquidMixtureObj=self.ThermalConductivityLiquidMixture,
SurfaceTensionMixtureObj=self.SurfaceTensionMixture)
self._properties = PropertyCorrelationsPackage(**kwargs)
return self._properties
|
7bf3384107a08222429612e930cb4fa0bfdc82f8
|
10ddfb2d43a8ec5d47ce35dc0b8acf4fd58dea94
|
/Python/cherry-pickup.py
|
c74f6c4ca9505b0f48f4173eef02c317ce2c7dfe
|
[
"MIT"
] |
permissive
|
kamyu104/LeetCode-Solutions
|
f54822059405ef4df737d2e9898b024f051fd525
|
4dc4e6642dc92f1983c13564cc0fd99917cab358
|
refs/heads/master
| 2023-09-02T13:48:26.830566
| 2023-08-28T10:11:12
| 2023-08-28T10:11:12
| 152,631,182
| 4,549
| 1,651
|
MIT
| 2023-05-31T06:10:33
| 2018-10-11T17:38:35
|
C++
|
UTF-8
|
Python
| false
| false
| 1,369
|
py
|
cherry-pickup.py
|
# Time: O(n^3)
# Space: O(n^2)
class Solution(object):
def cherryPickup(self, grid):
"""
:type grid: List[List[int]]
:rtype: int
"""
# dp holds the max # of cherries two k-length paths can pickup.
# The two k-length paths arrive at (i, k - i) and (j, k - j),
# respectively.
n = len(grid)
dp = [[-1 for _ in xrange(n)] for _ in xrange(n)]
dp[0][0] = grid[0][0]
max_len = 2 * (n-1)
directions = [(0, 0), (-1, 0), (0, -1), (-1, -1)]
for k in xrange(1, max_len+1):
for i in reversed(xrange(max(0, k-n+1), min(k+1, n))): # 0 <= i < n, 0 <= k-i < n
for j in reversed(xrange(i, min(k+1, n))): # i <= j < n, 0 <= k-j < n
if grid[i][k-i] == -1 or grid[j][k-j] == -1:
dp[i][j] = -1
continue
cnt = grid[i][k-i]
if i != j:
cnt += grid[j][k-j]
max_cnt = -1
for direction in directions:
ii, jj = i+direction[0], j+direction[1]
if ii >= 0 and jj >= 0 and dp[ii][jj] >= 0:
max_cnt = max(max_cnt, dp[ii][jj]+cnt)
dp[i][j] = max_cnt
return max(dp[n-1][n-1], 0)
|
b50f9e2ce5b9abce71598250d5753b3c579d7a30
|
19da539e0174f9139477c2105fb3ba75d4b3437e
|
/test.py
|
e6055d3c784791e053ef125a2eba53ccbdf03eee
|
[] |
no_license
|
tg12/FAIG
|
b9d828960e8af1411b5a8303957bc11bed9f4038
|
dfa18b0137419f7018c3cf97db173300cb30f658
|
refs/heads/master
| 2023-07-11T00:39:46.613092
| 2023-06-21T17:43:06
| 2023-06-21T17:43:06
| 117,366,959
| 158
| 78
| null | 2023-06-21T17:43:07
| 2018-01-13T18:16:52
|
Python
|
UTF-8
|
Python
| false
| false
| 1,941
|
py
|
test.py
|
'''THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, TITLE AND
NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR ANYONE
DISTRIBUTING THE SOFTWARE BE LIABLE FOR ANY DAMAGES OR OTHER LIABILITY,
WHETHER IN CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.'''
# Bitcoin Cash (BCH) qpz32c4lg7x7lnk9jg6qg7s4uavdce89myax5v5nuk
# Ether (ETH) - 0x843d3DEC2A4705BD4f45F674F641cE2D0022c9FB
# Litecoin (LTC) - Lfk5y4F7KZa9oRxpazETwjQnHszEPvqPvu
# Bitcoin (BTC) - 34L8qWiQyKr8k4TnHDacfjbaSqQASbBtTd
# contact :- github@jamessawyer.co.uk
'''THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, TITLE AND
NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR ANYONE
DISTRIBUTING THE SOFTWARE BE LIABLE FOR ANY DAMAGES OR OTHER LIABILITY,
WHETHER IN CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.'''
# Bitcoin Cash (BCH) qpz32c4lg7x7lnk9jg6qg7s4uavdce89myax5v5nuk
# Ether (ETH) - 0x843d3DEC2A4705BD4f45F674F641cE2D0022c9FB
# Litecoin (LTC) - Lfk5y4F7KZa9oRxpazETwjQnHszEPvqPvu
# Bitcoin (BTC) - 34L8qWiQyKr8k4TnHDacfjbaSqQASbBtTd
"""This is to test modules."""
import json
import configparser
from igclient import IGClient
from apps.market_watcher import MarketWatcher
config = configparser.ConfigParser()
config.read("config.conf")
client = IGClient()
client.session()
def main():
epics = [i for i in json.loads(config["Epics"]["EPIC_IDS"])]
watcher = MarketWatcher(client=client, epics=epics)
watcher.watch()
if __name__ == "__main__":
main()
|
f047415307b42f58dac6c54ced69b6b215084316
|
80f94bea418d7956df1ba19d4d6a1d7715a94ade
|
/test/unit/app/tools/test_tool_deserialization.py
|
e7ce9f8cdf085e10cb2e93569b977d164bef56c8
|
[
"CC-BY-2.5",
"MIT",
"CC-BY-3.0",
"AFL-3.0"
] |
permissive
|
galaxyproject/galaxy
|
5748409eb6693b1611f289d164f85e20c3237495
|
b9ae7a16ba0465995e880ae9701b7e87226b9bab
|
refs/heads/dev
| 2023-08-28T22:35:51.248138
| 2023-08-26T08:02:33
| 2023-08-26T08:02:33
| 31,211,061
| 1,277
| 1,137
|
NOASSERTION
| 2023-09-14T19:39:01
| 2015-02-23T14:18:06
|
Python
|
UTF-8
|
Python
| false
| false
| 1,900
|
py
|
test_tool_deserialization.py
|
import pytest
from galaxy.model.unittest_utils import GalaxyDataTestApp
from galaxy.tool_util.parser import get_tool_source
from galaxy.tool_util.parser.cwl import CwlToolSource
from galaxy.tools import create_tool_from_source
XML_TOOL = """
<tool id="tool_id" name="xml tool" version="1"/>
"""
CWL_TOOL = """
cwlVersion: v1.0
class: CommandLineTool
baseCommand: echo
inputs:
message:
type: string
inputBinding:
position: 1
outputs: []
"""
YAML_TOOL = """
id: simple_constructs_y
name: simple_constructs_y
version: 1.0
command:
>
echo "$booltest" >> $out_file1;
inputs:
- name: booltest
type: boolean
truevalue: booltrue
falsevalue: boolfalse
checked: false
outputs:
out_file1:
format: txt
"""
class ToolApp(GalaxyDataTestApp):
name = "galaxy"
biotools_metadata_source = None
job_search = None
is_webapp = True
@pytest.fixture
def tool_app():
return ToolApp()
def _deserialize(app, tool_source_class, raw_tool_source):
tool_source = get_tool_source(tool_source_class=tool_source_class, raw_tool_source=raw_tool_source)
assert type(tool_source).__name__ == tool_source_class
return create_tool_from_source(app, tool_source=tool_source)
def test_deserialize_xml_tool(tool_app):
tool = _deserialize(tool_app, tool_source_class="XmlToolSource", raw_tool_source=XML_TOOL)
assert tool.id == "tool_id"
assert tool.name == "xml tool"
def test_deserialize_yaml_tool(tool_app):
tool = _deserialize(tool_app, tool_source_class="YamlToolSource", raw_tool_source=YAML_TOOL)
assert tool.id == "simple_constructs_y"
assert tool.name == "simple_constructs_y"
def test_deserialize_cwl_tool(tool_app):
# Can't verify much about cwl tools at this point
tool_source = get_tool_source(tool_app, tool_source_class="CwlToolSource", raw_tool_source=CWL_TOOL)
assert isinstance(tool_source, CwlToolSource)
|
c519c5e810b792d3b21c9aa75532d8c060623ad1
|
749af8e81d5ccd2d8714a34434a9c77772df551b
|
/tools/generate_formula_api.py
|
8cc08c3f5ed0dffbf62724227b17f098240910d9
|
[
"BSD-3-Clause"
] |
permissive
|
statsmodels/statsmodels
|
98ca67192c08bcc611ed3a75edaded2c7181ab98
|
01b19d7d111b29c183f620ff0a949ef6391ff8ee
|
refs/heads/main
| 2023-09-05T13:05:49.497076
| 2023-09-01T10:54:50
| 2023-09-01T10:54:50
| 1,885,237
| 8,666
| 3,023
|
BSD-3-Clause
| 2023-09-13T17:51:48
| 2011-06-12T17:04:50
|
Python
|
UTF-8
|
Python
| false
| false
| 2,700
|
py
|
generate_formula_api.py
|
#!/usr/bin/env python
"""
This will generate an API file for formula in dir/statsmodels/formula/api.py
It first builds statsmodels in place, then generates the file. It's to be run
by developers to add files to the formula API without having to maintain this
by hand.
usage
generate_formula_api /home/skipper/statsmodels/statsmodels/
"""
import os
import sys
def iter_subclasses(cls, _seen=None, template_classes=[]):
"""
Generator to iterate over all the subclasses of Model. Based on
http://code.activestate.com/recipes/576949-find-all-subclasses-of-a-given-class/
Yields class
"""
if not isinstance(cls, type):
raise TypeError(
"itersubclasses must be called with "
"new-style classes, not %.100r" % cls
)
if _seen is None:
_seen = set()
try:
subs = cls.__subclasses__()
except TypeError: # fails only when cls is type
subs = cls.__subclasses__(cls)
for sub in subs:
if sub not in _seen and sub.__name__ not in template_classes:
_seen.add(sub)
# we do not want to yield the templates, but we do want to
# recurse on them
yield sub
for sub in iter_subclasses(sub, _seen, template_classes):
yield sub
def write_formula_api(directory):
template_classes = [
"DiscreteModel",
"BinaryModel",
"MultinomialModel",
"OrderedModel",
"CountModel",
"LikelihoodModel",
"GenericLikelihoodModel",
"TimeSeriesModel",
# this class should really be deleted
"ARIMAProcess",
# these need some more work, so do not expose them
"ARIMA",
"VAR",
"SVAR",
"AR",
"NBin",
"NbReg",
"ARMA",
]
path = os.path.join(directory, "statsmodels", "formula", "api.py")
fout = open(path, "w", encoding="utf-8")
for model in iter_subclasses(Model, template_classes=template_classes):
print("Generating API for %s" % model.__name__)
fout.write(
"from " + model.__module__ + " import " + model.__name__ + "\n"
)
fout.write(
model.__name__.lower() + " = " + model.__name__ + ".from_formula\n"
)
fout.close()
if __name__ == "__main__":
import statsmodels.api as sm
print(
"Generating formula API for statsmodels version %s"
% sm.version.full_version
)
directory = sys.argv[1]
cur_dir = os.path.dirname(__file__)
os.chdir(directory)
# it needs to be installed to walk the whole subclass chain?
from statsmodels.base.model import Model
write_formula_api(directory)
|
edf12d65ee5503845364c70a5504e2d522c6696a
|
3f02defc58172182848e8d9a609ae69472abd899
|
/utils/threat-mvp/mail.py
|
91db2d4e089326f0167c764100ed01d0a16a42d4
|
[
"LicenseRef-scancode-free-unknown",
"Apache-2.0"
] |
permissive
|
OWASP/threat-dragon
|
d50a6aeb1a3620308b0ce966cb2eab698b0a31d1
|
3419ec962ac1f9686245da103465be053173f4c1
|
refs/heads/main
| 2023-09-01T16:51:38.811499
| 2023-08-28T15:21:00
| 2023-08-28T15:21:00
| 268,796,991
| 590
| 196
|
Apache-2.0
| 2023-09-12T12:36:53
| 2020-06-02T12:37:47
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 2,074
|
py
|
mail.py
|
# Local SMTP debugging server
#python -m smtpd -c DebuggingServer -n localhost:1025
import logger as log
import smtplib
from socket import gaierror
# Read Config file
import configparser
config = configparser.ConfigParser()
config.read('config.ini')
def sendErrorEmail(subject, body):
try:
receiver = ""
sendEmail(subject, body, receiver)
except Exception as e:
log.logger.info('The sending of the email was failure.')
def sendEmail(subject, body, receiver):
try:
emailEnable = config['email']['sendingEmails']
sendingEmails = str(emailEnable).lower() in ("yes", "true")
except Exception as e:
sendingEmails = False
try:
if sendingEmails:
port = config['email']['port']
smtp_server = config['email']['smtp_server']
login = config['email']['login']
password = config['email']['password']
sender = config['email']['sender']
if len(receiver) == 0:
receiver = sender
# The subject, to and from emails, the message body
message = f"""\
Subject: {subject}
To: {receiver}
From: {sender}
{body}"""
# Send the email using the credentials
try:
with smtplib.SMTP(smtp_server, port) as server:
server.sendmail(sender, receiver, message)
except (gaierror, ConnectionRefusedError):
log.logger.error("Failed to connect to the server. Bad connection settings")
except smtplib.SMTPServerDisconnected:
log.logger.error("Failed to connect to the server")
except smtplib.SMTPException as e:
log.logger.error(f"SMTP error occurred: {e}")
else:
log.logger.info("The email was sent successfully")
else:
log.logger.info('The sending of emails is disable.')
except Exception as e:
log.logger.info('The sending of emails is disable.')
|
d70dce075b04a2ed431f328d412cdb87278ffb3a
|
80d505489f5354d4b29156d6eea7e3516162bcc7
|
/exercises/concept/plane-tickets/plane_tickets.py
|
4399791b2235cf53de6094366dbfeb29f9d8de9d
|
[
"Python-2.0",
"BSD-3-Clause",
"MIT"
] |
permissive
|
exercism/python
|
419e89690070eef42fc4c932faa0df0706d5c222
|
1e71b8a00c8b34c251d785f0a10843efc5234994
|
refs/heads/main
| 2023-08-29T03:18:02.845245
| 2023-08-25T12:50:16
| 2023-08-25T12:50:16
| 17,274,389
| 1,588
| 1,513
|
MIT
| 2023-09-14T20:33:13
| 2014-02-28T03:48:58
|
Python
|
UTF-8
|
Python
| false
| false
| 1,426
|
py
|
plane_tickets.py
|
"""Functions to automate Conda airlines ticketing system."""
def generate_seat_letters(amount):
""" Generate a series of seat letters for airline boarding.
:param amount: int - amount of seat letters to be generated.
:return: generator - generator that yields seat letters.
Seat letters are generated from A to D.
After D it should start again with A.
Example: A, B, C, D
"""
pass
def generate_seats(amount):
""" Generate a series of seat numbers for airline boarding.
:param amount: int - Amount of seats to be generated.
:return: generator - generator that yields seat numbers.
There should be no row 13
Seat numbers are generated with each row having 4 seats.
These should be sorted from low to high.
Example: 3C, 3D, 4A, 4B
"""
pass
def assign_seats(passengers):
""" Assign seats to passengers.
:param passengers: list[str] - A list of strings containing names of passengers.
:return: dict - with the names of the passengers as keys and seat numbers as values.
Example output: {"Foo": "1A", "Bar": "1B"}
"""
pass
def generate_codes(seat_numbers, flight_id):
"""Generate codes for a ticket.
:param seat_numbers: list[str] - list of seat numbers.
:param flight_id: str - string containing the flight identification.
:return: generator - generator that yields 12 character long strings.
"""
|
1476940f1e8ea369c603ed7be442359e17553fdf
|
eb9f655206c43c12b497c667ba56a0d358b6bc3a
|
/python/helpers/typeshed/stubs/fpdf2/fpdf/fonts.pyi
|
5a894fff627819cff9650fdc84e5a3482ce02be9
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
JetBrains/intellij-community
|
2ed226e200ecc17c037dcddd4a006de56cd43941
|
05dbd4575d01a213f3f4d69aa4968473f2536142
|
refs/heads/master
| 2023-09-03T17:06:37.560889
| 2023-09-03T11:51:00
| 2023-09-03T12:12:27
| 2,489,216
| 16,288
| 6,635
|
Apache-2.0
| 2023-09-12T07:41:58
| 2011-09-30T13:33:05
| null |
UTF-8
|
Python
| false
| false
| 58
|
pyi
|
fonts.pyi
|
from typing import Any
courier: Any
fpdf_charwidths: Any
|
c47b15212ff7663533da1dfe93f1d5cb08f3f3bb
|
a46bc4d0750de9c99b86ab45fd2e81eb18362a56
|
/scripts/process_json.py
|
e2069872e418e9540637eab709582b1c01b82486
|
[] |
no_license
|
codebling/vs-code-default-keybindings
|
e11e8c06855abefd520bbd293575d5ce5ba02ae8
|
aa3817cbcf50c8f0cf306c74b6129d3cb3e48ada
|
refs/heads/master
| 2023-08-21T23:52:14.599573
| 2023-08-11T22:14:07
| 2023-08-11T22:14:07
| 186,748,839
| 166
| 90
| null | 2023-09-14T17:29:47
| 2019-05-15T04:25:33
|
Python
|
UTF-8
|
Python
| false
| false
| 3,834
|
py
|
process_json.py
|
#!/usr/bin/env python3
'''
you don't need to run this script,
unless you'd like to update the files here with the latest
version of VSCode.
usage:
1) install and open the latest VSCode
2) press Ctrl-Shift-P or Cmd-Shift-P to open 'quick start'
3) type 'Open Default Keyboard Shortcuts (JSON)' into the box and press Enter
4) copy and paste the resulting json file into
scripts/linux.keybindings.raw.json
scripts/windows.keybindings.raw.json
scripts/macos.keybindings.raw.json
, based on your OS
5) uncomment one of the lines like
processRawFile('linux.keybindings.raw.json')
at the bottom of this file
by removing the # and saving changes.
6) run this script, for example, in a terminal, run
cd scripts
python3 process_json.py
with any recent version of Python.
the script will replace/generate files like
linux.keybindings.json
linux.negative.keybindings.json
in the parent directory.
'''
import re
import os
def makeCommandsNegatives(s):
# it would be less fragile to actually parse the json,
# but this should be good enough for now because even if a string
# like this were to occur in a command, the quotes would be escaped.
# note: allow any number of spaces after the ".
sNegative = re.sub(r'(", *")command": "', r'\1command": "-', s)
if s == sNegative:
print('Warning: no commands found to make negative.')
return sNegative
def finishingTouches(filename, osname):
with open(filename, 'rb') as fIn:
b = fIn.read()
# fix line-endings
if osname == 'windows':
b = b.replace(b'\r\n', b'\n').replace(b'\n', b'\r\n')
else:
b = b.replace(b'\r\n', b'\n')
# for macos, indent everything but the opening [ by 4 spaces
if osname == 'macos':
b = b.replace(b'\n', b'\n ')
b = b.replace(b'\n [\n', b'\n[\n')
# for windows, indent everything but the opening [ by 2 spaces
if osname == 'windows':
b = b.replace(b'\r\n', b'\r\n ')
b = b.replace(b'\r\n [\r\n', b'\r\n[\r\n')
# for linux, indent everything but the opening [ by 2 spaces
if osname == 'linux' :
b = b.replace(b'\n', b'\n ')
b = b.replace(b'\n [\n', b'\n[\n')
# remove unnecessary line
b = b.replace(b'// Override key bindings by placing them into your key bindings file.', b'')
# remove list of available commands
b = b.split(b'// Here are other available commands:')[0]
with open(filename, 'wb') as fOut:
fOut.write(b)
def processRawFile(inputFile):
if not os.path.isfile(inputFile):
print('Not found: ' + inputFile)
return
osname = inputFile.split('.')[0]
if osname not in ['linux', 'windows', 'macos']:
print('Expected the filename to start with linux, windows, or macos')
return
with open(inputFile, encoding='utf-8') as fIn:
s = fIn.read()
# get rid of opening/closing whitespace
s = s.strip()
outputFile = '../' + osname + '.keybindings.json'
with open(outputFile, 'w', encoding='utf-8') as fOut:
fOut.write(s)
outputNegFile = '../' + osname + '.negative.keybindings.json'
with open(outputNegFile, 'w', encoding='utf-8') as fOut:
fOut.write(makeCommandsNegatives(s))
finishingTouches(outputFile, osname)
finishingTouches(outputNegFile, osname)
print('Wrote to ' + outputFile)
print('Wrote to ' + outputNegFile)
if __name__ == '__main__':
print('Processing json')
if os.path.exists('linux.keybindings.raw.json'):
processRawFile('linux.keybindings.raw.json')
if os.path.exists('windows.keybindings.raw.json'):
processRawFile('windows.keybindings.raw.json')
if os.path.exists('macos.keybindings.raw.json'):
processRawFile('macos.keybindings.raw.json')
print('Done')
|
d87e9a27c80892bd847bc0dd5fe23b33e8d9e202
|
d010607a23cd158210ba9710c3e9d2ab1166600b
|
/tests/model_selection/test_incremental_warns.py
|
d03f0a918f626dc421134fe8d11d59a8fedb2545
|
[
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause"
] |
permissive
|
dask/dask-ml
|
b7ce11e8af2b22d33957ac120b7d1fe597da2c2d
|
b5640cbb913954a227585cae413d89d6b48f4c0f
|
refs/heads/main
| 2023-08-19T22:23:56.835722
| 2023-03-24T22:04:13
| 2023-03-24T22:04:13
| 94,455,745
| 883
| 278
|
BSD-3-Clause
| 2023-06-27T23:50:02
| 2017-06-15T15:56:06
|
Python
|
UTF-8
|
Python
| false
| false
| 1,534
|
py
|
test_incremental_warns.py
|
import numpy as np
import pytest
from distributed.utils_test import gen_cluster
from dask_ml.datasets import make_classification
from dask_ml.model_selection import IncrementalSearchCV, InverseDecaySearchCV
from dask_ml.utils import ConstantFunction
@gen_cluster(client=True)
async def test_warns_decay_rate(c, s, a, b):
X, y = make_classification(n_samples=100, n_features=5, chunks=10)
params = {"value": np.random.RandomState(42).rand(1000)}
model = ConstantFunction()
kwargs = dict(max_iter=5, n_initial_parameters=5)
search = IncrementalSearchCV(model, params, **kwargs)
match = r"deprecated since Dask-ML v1.4.0."
with pytest.warns(FutureWarning, match=match):
await search.fit(X, y)
# Make sure the printed warning message works
search = IncrementalSearchCV(model, params, decay_rate=None, **kwargs)
await search.fit(X, y)
@gen_cluster(client=True)
async def test_warns_decay_rate_wanted(c, s, a, b):
X, y = make_classification(n_samples=100, n_features=5, chunks=10)
params = {"value": np.random.RandomState(42).rand(1000)}
model = ConstantFunction()
search = IncrementalSearchCV(
model, params, max_iter=5, n_initial_parameters=5, decay_rate=1
)
match = "decay_rate is deprecated .* Use InverseDecaySearchCV"
with pytest.warns(FutureWarning, match=match):
await search.fit(X, y)
# Make sure old behavior is retained w/o warning
search = InverseDecaySearchCV(model, params, decay_rate=1)
await search.fit(X, y)
|
10a71577a5127a3fee9b9def9d044c0e7ebb1cc5
|
620323fc090cebaf7aca456ff3f7fbbe1e210394
|
/qt__pyqt__pyside__pyqode/QLineEdit__mask.py
|
12908ac7ef7ba9f0b92a0b7973210d3c50d51399
|
[
"CC-BY-4.0"
] |
permissive
|
gil9red/SimplePyScripts
|
bd2733372728bf9b9f00570e90316fa12116516b
|
773c2c9724edd8827a1dbd91694d780e03fcb05a
|
refs/heads/master
| 2023-08-31T04:26:09.120173
| 2023-08-30T17:22:59
| 2023-08-30T17:22:59
| 22,650,442
| 157
| 46
| null | 2023-09-08T17:51:33
| 2014-08-05T16:19:52
|
Python
|
UTF-8
|
Python
| false
| false
| 893
|
py
|
QLineEdit__mask.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = "ipetrash"
# SOURCE: https://doc.qt.io/qt-5/qlineedit.html#inputMask-prop
from PyQt5.QtWidgets import QApplication, QLineEdit, QWidget, QFormLayout
class MainWindow(QWidget):
def __init__(self):
super().__init__()
self.mask_qt = QLineEdit("+7([000])[000]-[0000]")
self.mask_qt.textEdited.connect(self._on_changed_mask)
self.test_input = QLineEdit("9991239999")
layout = QFormLayout()
layout.addRow("Qt mask:", self.mask_qt)
layout.addRow("Test", self.test_input)
self.setLayout(layout)
def _on_changed_mask(self, mask):
text = self.test_input.text()
self.test_input.setInputMask(mask)
self.test_input.setText(text)
if __name__ == "__main__":
app = QApplication([])
mw = MainWindow()
mw.show()
app.exec()
|
d269606c5b2e0db290a7d18e8ce563307bee4a95
|
e2ae9cf5244150a4033bb4af9925b9335a756bf1
|
/lectures/demos/_cached_function.py
|
80349bb84bb05b8b38d2b7f26c6b2b2f09955587
|
[
"MIT"
] |
permissive
|
PredictiveScienceLab/uq-course
|
7ebe2ed316e8e26e5ebdab1dbffd1d52a1d5222d
|
10d937ccd3bcc10e57fe3653f6fe3d49076b1839
|
refs/heads/master
| 2022-04-29T11:10:14.630384
| 2022-03-30T19:54:50
| 2022-03-30T19:54:50
| 115,515,599
| 236
| 106
|
MIT
| 2020-02-08T19:25:38
| 2017-12-27T11:45:19
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 1,923
|
py
|
_cached_function.py
|
"""
Implementation of a cached function of a numpy array input.
Author:
Ilias Bilionis
Date:
6/6/2014
"""
__all__ = ['CachedFunction']
from . import Cache
from . import NumpyArrayCache
class CachedFunction(object):
"""
A class representing a cached function.
"""
# The input cache
_input_cashe = None
# The output cache
_output_cache = None
# The underlying function
_f = None
# The object that implements the function (if any)
_obj = None
def __init__(self, f,
input_cache_type=NumpyArrayCache,
input_cache_args={'name': 'Input Cache'},
output_cache_type=NumpyArrayCache,
output_cache_args={'name': 'Output Cache'}):
"""
Initialize the object.
"""
self._count = 0
self._count_eval = 0
self._f = f
self._input_cache = input_cache_type(**input_cache_args)
self._output_cache = output_cache_type(**output_cache_args)
def __get__(self, obj, type=None):
return self.__class__(self._f.__get__(obj, type))
def __call__(self, *args, **kw):
"""
Call the function at x.
"""
x = args[0]
# Look for x in the cache
i = self._input_cache.get_index_of(x)
self._count += 1
if i == -1:
# Not found in cache, so evaluate
y = self._f(*args, **kw)
self._count_eval += 1
self._input_cache.append(x)
self._output_cache.append(y)
else:
# Found in cache, recover
y = self._output_cache[i]
return y
def __str__(self):
"""
Return a string representation of the object.
"""
s = 'Cached function:\n'
s = ('Evaluations = ' + str(self._count) +
' (' + str(self._count_eval) + ' actual)')
return s
|
30cfc586f2e91c811d63899115361ee1c9b6ea5e
|
5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d
|
/alipay/aop/api/domain/MybankCreditLoantradePayArSignModel.py
|
a1b1a4cfc0b9d442f04ee87e5ecfb2ab735cbce6
|
[
"Apache-2.0"
] |
permissive
|
alipay/alipay-sdk-python-all
|
8bd20882852ffeb70a6e929038bf88ff1d1eff1c
|
1fad300587c9e7e099747305ba9077d4cd7afde9
|
refs/heads/master
| 2023-08-27T21:35:01.778771
| 2023-08-23T07:12:26
| 2023-08-23T07:12:26
| 133,338,689
| 247
| 70
|
Apache-2.0
| 2023-04-25T04:54:02
| 2018-05-14T09:40:54
|
Python
|
UTF-8
|
Python
| false
| false
| 4,540
|
py
|
MybankCreditLoantradePayArSignModel.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class MybankCreditLoantradePayArSignModel(object):
def __init__(self):
self._alipay_open_id = None
self._alipay_user_id = None
self._biz_scene = None
self._sign_param = None
self._site = None
self._site_open_id = None
self._site_user_id = None
self._sub_biz_scene = None
@property
def alipay_open_id(self):
return self._alipay_open_id
@alipay_open_id.setter
def alipay_open_id(self, value):
self._alipay_open_id = value
@property
def alipay_user_id(self):
return self._alipay_user_id
@alipay_user_id.setter
def alipay_user_id(self, value):
self._alipay_user_id = value
@property
def biz_scene(self):
return self._biz_scene
@biz_scene.setter
def biz_scene(self, value):
self._biz_scene = value
@property
def sign_param(self):
return self._sign_param
@sign_param.setter
def sign_param(self, value):
self._sign_param = value
@property
def site(self):
return self._site
@site.setter
def site(self, value):
self._site = value
@property
def site_open_id(self):
return self._site_open_id
@site_open_id.setter
def site_open_id(self, value):
self._site_open_id = value
@property
def site_user_id(self):
return self._site_user_id
@site_user_id.setter
def site_user_id(self, value):
self._site_user_id = value
@property
def sub_biz_scene(self):
return self._sub_biz_scene
@sub_biz_scene.setter
def sub_biz_scene(self, value):
self._sub_biz_scene = value
def to_alipay_dict(self):
params = dict()
if self.alipay_open_id:
if hasattr(self.alipay_open_id, 'to_alipay_dict'):
params['alipay_open_id'] = self.alipay_open_id.to_alipay_dict()
else:
params['alipay_open_id'] = self.alipay_open_id
if self.alipay_user_id:
if hasattr(self.alipay_user_id, 'to_alipay_dict'):
params['alipay_user_id'] = self.alipay_user_id.to_alipay_dict()
else:
params['alipay_user_id'] = self.alipay_user_id
if self.biz_scene:
if hasattr(self.biz_scene, 'to_alipay_dict'):
params['biz_scene'] = self.biz_scene.to_alipay_dict()
else:
params['biz_scene'] = self.biz_scene
if self.sign_param:
if hasattr(self.sign_param, 'to_alipay_dict'):
params['sign_param'] = self.sign_param.to_alipay_dict()
else:
params['sign_param'] = self.sign_param
if self.site:
if hasattr(self.site, 'to_alipay_dict'):
params['site'] = self.site.to_alipay_dict()
else:
params['site'] = self.site
if self.site_open_id:
if hasattr(self.site_open_id, 'to_alipay_dict'):
params['site_open_id'] = self.site_open_id.to_alipay_dict()
else:
params['site_open_id'] = self.site_open_id
if self.site_user_id:
if hasattr(self.site_user_id, 'to_alipay_dict'):
params['site_user_id'] = self.site_user_id.to_alipay_dict()
else:
params['site_user_id'] = self.site_user_id
if self.sub_biz_scene:
if hasattr(self.sub_biz_scene, 'to_alipay_dict'):
params['sub_biz_scene'] = self.sub_biz_scene.to_alipay_dict()
else:
params['sub_biz_scene'] = self.sub_biz_scene
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = MybankCreditLoantradePayArSignModel()
if 'alipay_open_id' in d:
o.alipay_open_id = d['alipay_open_id']
if 'alipay_user_id' in d:
o.alipay_user_id = d['alipay_user_id']
if 'biz_scene' in d:
o.biz_scene = d['biz_scene']
if 'sign_param' in d:
o.sign_param = d['sign_param']
if 'site' in d:
o.site = d['site']
if 'site_open_id' in d:
o.site_open_id = d['site_open_id']
if 'site_user_id' in d:
o.site_user_id = d['site_user_id']
if 'sub_biz_scene' in d:
o.sub_biz_scene = d['sub_biz_scene']
return o
|
5bbccd53a5478b80a857a9fb97d04ba53a17813b
|
b7163b44b679e082fe97cf7fcd0c73b2fcdb38eb
|
/modules/dbnd/src/dbnd/_vendor/dulwich/lru_cache.py
|
821da5b804ca28eb5d8e795d6ee9cf404450a838
|
[
"Apache-2.0"
] |
permissive
|
databand-ai/dbnd
|
70c95d95e12bfb8ab471a6dce27691ed658cb92d
|
d59c99dcdcd280d7eec36a693dd80f8c8c831ea2
|
refs/heads/develop
| 2023-06-24T18:07:56.524526
| 2023-05-28T07:57:36
| 2023-05-28T07:57:36
| 231,361,064
| 257
| 33
|
Apache-2.0
| 2023-08-06T08:30:28
| 2020-01-02T10:42:47
|
Python
|
UTF-8
|
Python
| false
| false
| 14,466
|
py
|
lru_cache.py
|
# lru_cache.py -- Simple LRU cache for dulwich
# Copyright (C) 2006, 2008 Canonical Ltd
#
# Dulwich is dual-licensed under the Apache License, Version 2.0 and the GNU
# General Public License as public by the Free Software Foundation; version 2.0
# or (at your option) any later version. You can redistribute it and/or
# modify it under the terms of either of these two licenses.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# You should have received a copy of the licenses; if not, see
# <http://www.gnu.org/licenses/> for a copy of the GNU General Public License
# and <http://www.apache.org/licenses/LICENSE-2.0> for a copy of the Apache
# License, Version 2.0.
#
"""A simple least-recently-used (LRU) cache."""
_null_key = object()
class _LRUNode(object):
"""This maintains the linked-list which is the lru internals."""
__slots__ = ('prev', 'next_key', 'key', 'value', 'cleanup', 'size')
def __init__(self, key, value, cleanup=None):
self.prev = None
self.next_key = _null_key
self.key = key
self.value = value
self.cleanup = cleanup
# TODO: We could compute this 'on-the-fly' like we used to, and remove
# one pointer from this object, we just need to decide if it
# actually costs us much of anything in normal usage
self.size = None
def __repr__(self):
if self.prev is None:
prev_key = None
else:
prev_key = self.prev.key
return '%s(%r n:%r p:%r)' % (self.__class__.__name__, self.key,
self.next_key, prev_key)
def run_cleanup(self):
if self.cleanup is not None:
self.cleanup(self.key, self.value)
self.cleanup = None
# Just make sure to break any refcycles, etc
self.value = None
class LRUCache(object):
"""A class which manages a cache of entries, removing unused ones."""
def __init__(self, max_cache=100, after_cleanup_count=None):
self._cache = {}
# The "HEAD" of the lru linked list
self._most_recently_used = None
# The "TAIL" of the lru linked list
self._least_recently_used = None
self._update_max_cache(max_cache, after_cleanup_count)
def __contains__(self, key):
return key in self._cache
def __getitem__(self, key):
cache = self._cache
node = cache[key]
# Inlined from _record_access to decrease the overhead of __getitem__
# We also have more knowledge about structure if __getitem__ is
# succeeding, then we know that self._most_recently_used must not be
# None, etc.
mru = self._most_recently_used
if node is mru:
# Nothing to do, this node is already at the head of the queue
return node.value
# Remove this node from the old location
node_prev = node.prev
next_key = node.next_key
# benchmarking shows that the lookup of _null_key in globals is faster
# than the attribute lookup for (node is self._least_recently_used)
if next_key is _null_key:
# 'node' is the _least_recently_used, because it doesn't have a
# 'next' item. So move the current lru to the previous node.
self._least_recently_used = node_prev
else:
node_next = cache[next_key]
node_next.prev = node_prev
node_prev.next_key = next_key
# Insert this node at the front of the list
node.next_key = mru.key
mru.prev = node
self._most_recently_used = node
node.prev = None
return node.value
def __len__(self):
return len(self._cache)
def _walk_lru(self):
"""Walk the LRU list, only meant to be used in tests."""
node = self._most_recently_used
if node is not None:
if node.prev is not None:
raise AssertionError('the _most_recently_used entry is not'
' supposed to have a previous entry'
' %s' % (node,))
while node is not None:
if node.next_key is _null_key:
if node is not self._least_recently_used:
raise AssertionError('only the last node should have'
' no next value: %s' % (node,))
node_next = None
else:
node_next = self._cache[node.next_key]
if node_next.prev is not node:
raise AssertionError('inconsistency found, node.next.prev'
' != node: %s' % (node,))
if node.prev is None:
if node is not self._most_recently_used:
raise AssertionError('only the _most_recently_used should'
' not have a previous node: %s'
% (node,))
else:
if node.prev.next_key != node.key:
raise AssertionError('inconsistency found, node.prev.next'
' != node: %s' % (node,))
yield node
node = node_next
def add(self, key, value, cleanup=None):
"""Add a new value to the cache.
Also, if the entry is ever removed from the cache, call
cleanup(key, value).
:param key: The key to store it under
:param value: The object to store
:param cleanup: None or a function taking (key, value) to indicate
'value' should be cleaned up.
"""
if key is _null_key:
raise ValueError('cannot use _null_key as a key')
if key in self._cache:
node = self._cache[key]
node.run_cleanup()
node.value = value
node.cleanup = cleanup
else:
node = _LRUNode(key, value, cleanup=cleanup)
self._cache[key] = node
self._record_access(node)
if len(self._cache) > self._max_cache:
# Trigger the cleanup
self.cleanup()
def cache_size(self):
"""Get the number of entries we will cache."""
return self._max_cache
def get(self, key, default=None):
node = self._cache.get(key, None)
if node is None:
return default
self._record_access(node)
return node.value
def keys(self):
"""Get the list of keys currently cached.
Note that values returned here may not be available by the time you
request them later. This is simply meant as a peak into the current
state.
:return: An unordered list of keys that are currently cached.
"""
return self._cache.keys()
def items(self):
"""Get the key:value pairs as a dict."""
return dict((k, n.value) for k, n in self._cache.items())
def cleanup(self):
"""Clear the cache until it shrinks to the requested size.
This does not completely wipe the cache, just makes sure it is under
the after_cleanup_count.
"""
# Make sure the cache is shrunk to the correct size
while len(self._cache) > self._after_cleanup_count:
self._remove_lru()
def __setitem__(self, key, value):
"""Add a value to the cache, there will be no cleanup function."""
self.add(key, value, cleanup=None)
def _record_access(self, node):
"""Record that key was accessed."""
# Move 'node' to the front of the queue
if self._most_recently_used is None:
self._most_recently_used = node
self._least_recently_used = node
return
elif node is self._most_recently_used:
# Nothing to do, this node is already at the head of the queue
return
# We've taken care of the tail pointer, remove the node, and insert it
# at the front
# REMOVE
if node is self._least_recently_used:
self._least_recently_used = node.prev
if node.prev is not None:
node.prev.next_key = node.next_key
if node.next_key is not _null_key:
node_next = self._cache[node.next_key]
node_next.prev = node.prev
# INSERT
node.next_key = self._most_recently_used.key
self._most_recently_used.prev = node
self._most_recently_used = node
node.prev = None
def _remove_node(self, node):
if node is self._least_recently_used:
self._least_recently_used = node.prev
self._cache.pop(node.key)
# If we have removed all entries, remove the head pointer as well
if self._least_recently_used is None:
self._most_recently_used = None
node.run_cleanup()
# Now remove this node from the linked list
if node.prev is not None:
node.prev.next_key = node.next_key
if node.next_key is not _null_key:
node_next = self._cache[node.next_key]
node_next.prev = node.prev
# And remove this node's pointers
node.prev = None
node.next_key = _null_key
def _remove_lru(self):
"""Remove one entry from the lru, and handle consequences.
If there are no more references to the lru, then this entry should be
removed from the cache.
"""
self._remove_node(self._least_recently_used)
def clear(self):
"""Clear out all of the cache."""
# Clean up in LRU order
while self._cache:
self._remove_lru()
def resize(self, max_cache, after_cleanup_count=None):
"""Change the number of entries that will be cached."""
self._update_max_cache(max_cache,
after_cleanup_count=after_cleanup_count)
def _update_max_cache(self, max_cache, after_cleanup_count=None):
self._max_cache = max_cache
if after_cleanup_count is None:
self._after_cleanup_count = self._max_cache * 8 / 10
else:
self._after_cleanup_count = min(after_cleanup_count,
self._max_cache)
self.cleanup()
class LRUSizeCache(LRUCache):
"""An LRUCache that removes things based on the size of the values.
This differs in that it doesn't care how many actual items there are,
it just restricts the cache to be cleaned up after so much data is stored.
The size of items added will be computed using compute_size(value), which
defaults to len() if not supplied.
"""
def __init__(self, max_size=1024*1024, after_cleanup_size=None,
compute_size=None):
"""Create a new LRUSizeCache.
:param max_size: The max number of bytes to store before we start
clearing out entries.
:param after_cleanup_size: After cleaning up, shrink everything to this
size.
:param compute_size: A function to compute the size of the values. We
use a function here, so that you can pass 'len' if you are just
using simple strings, or a more complex function if you are using
something like a list of strings, or even a custom object.
The function should take the form "compute_size(value) => integer".
If not supplied, it defaults to 'len()'
"""
self._value_size = 0
self._compute_size = compute_size
if compute_size is None:
self._compute_size = len
self._update_max_size(max_size, after_cleanup_size=after_cleanup_size)
LRUCache.__init__(self, max_cache=max(int(max_size/512), 1))
def add(self, key, value, cleanup=None):
"""Add a new value to the cache.
Also, if the entry is ever removed from the cache, call
cleanup(key, value).
:param key: The key to store it under
:param value: The object to store
:param cleanup: None or a function taking (key, value) to indicate
'value' should be cleaned up.
"""
if key is _null_key:
raise ValueError('cannot use _null_key as a key')
node = self._cache.get(key, None)
value_len = self._compute_size(value)
if value_len >= self._after_cleanup_size:
# The new value is 'too big to fit', as it would fill up/overflow
# the cache all by itself
if node is not None:
# We won't be replacing the old node, so just remove it
self._remove_node(node)
if cleanup is not None:
cleanup(key, value)
return
if node is None:
node = _LRUNode(key, value, cleanup=cleanup)
self._cache[key] = node
else:
self._value_size -= node.size
node.size = value_len
self._value_size += value_len
self._record_access(node)
if self._value_size > self._max_size:
# Time to cleanup
self.cleanup()
def cleanup(self):
"""Clear the cache until it shrinks to the requested size.
This does not completely wipe the cache, just makes sure it is under
the after_cleanup_size.
"""
# Make sure the cache is shrunk to the correct size
while self._value_size > self._after_cleanup_size:
self._remove_lru()
def _remove_node(self, node):
self._value_size -= node.size
LRUCache._remove_node(self, node)
def resize(self, max_size, after_cleanup_size=None):
"""Change the number of bytes that will be cached."""
self._update_max_size(max_size, after_cleanup_size=after_cleanup_size)
max_cache = max(int(max_size/512), 1)
self._update_max_cache(max_cache)
def _update_max_size(self, max_size, after_cleanup_size=None):
self._max_size = max_size
if after_cleanup_size is None:
self._after_cleanup_size = self._max_size * 8 // 10
else:
self._after_cleanup_size = min(after_cleanup_size, self._max_size)
|
c936fe95eadc92c167dbaf5f67dbd2ec96112289
|
e22e737d44349a7b976952cc32684b3088b2eabe
|
/extractive/GraphicalModel/graphicalModel.py
|
10758e4f3ee3800db4226cf8415220fa4c44ba6f
|
[] |
no_license
|
Law-AI/summarization
|
7d70301ecdc9527fc6da2885c2e448a5b3c3485e
|
77edff66005cca4c897e608db6d47509c9e8d029
|
refs/heads/aacl
| 2022-11-25T18:08:49.327287
| 2022-11-09T07:15:46
| 2022-11-09T07:15:46
| 162,098,522
| 124
| 51
| null | 2022-10-10T18:21:15
| 2018-12-17T08:29:13
|
Python
|
UTF-8
|
Python
| false
| false
| 5,890
|
py
|
graphicalModel.py
|
from rouge import Rouge
import os
import html2text
import nltk
from collections import OrderedDict
import operator
import html2text
import pycrfsuite
import sys
import time
import crf_train
import crf_test
import k_mix_model_test
FULL_TEXT = '../../FullText_html/'
MANUAL_SUM = '../../CaseAnalysis/'
MAX_LENGTH_SUMMARY = 100 # Define maxmimum words in summary
# MAX_PERCENT_SUMMARY = 34 # 10 # 10 in paper, 34 as discussed
SUMMARY_PERCENT = 34
REACHED_FILE = ''
# 41 min running
##
## Usage: python rouge_crf.py
## Additionally: python rouge_crf.py from_year to_year reached_file
##
## Goes upto, but not including to_year, skips files alphabetically smaller than reached_file
##
def get_summary(file):
'''
Combine crf predictions with k-mix-model
'''
text, indices = crf_test.parse_html(file)
# we have list of sentences and indices, without para information
doc_length = sum([len(line.split(' ')) for line in text])
tagger = pycrfsuite.Tagger()
tagger.open('crf_alltrain.model')
text, X_test, Y_pred = crf_test.test_crf(file, tagger)
kmm = k_mix_model_test.KMM(file)
# kmm contains score for each line in text, in serialized order
kmix_sorted = sorted(kmm.items(), key=operator.itemgetter(1),reverse=True)
# generate_summary
visited = {}
summary = {}
for pair in kmix_sorted:
sentence_id = pair[0]
label = Y_pred[sentence_id-1][0]
if label not in visited:
summary[text[sentence_id-1]] = label
visited[label] = 1
elif visited[label] == 2:
continue
else:
visited[label] = 2
summary[text[sentence_id-1]] = label
length = sum([len(key.split(' ')) for key in summary.keys()])
# print(length, SUMMARY_PERCENT * 0.01 * doc_length)
if length > SUMMARY_PERCENT * 0.01 * doc_length:
break
summary_txt = ''
order = ['F', 'I', 'A', 'LR', 'SS', 'SP', 'SO', 'R']
for category in order:
summary_txt += ''.join([key for key in summary if summary[key]==category]) + '\n'
# print(summary_txt)
return summary_txt
def get_manual_summary(file):
'''
Get manual summary from case analysis
'''
# rewriting parse_html, as format is different
with open(file, 'r') as f:
txt = f.read()
txt=(txt.replace('</?(?!(?:p class=indent)\b)[a-z](?:[^>\"\']|\"[^\"]*\"|\'[^\']*\')*>',''))
t = html2text.html2text(txt)
tokenized = nltk.tokenize.sent_tokenize(t)
t2 = []
for each in tokenized:
lines = list( filter( None, each.split('\n') ) )
t2.extend(lines)
tokenized = t2
start = 0
# Real summary is the first occurence of "Summmary"
while start < len(tokenized) and 'summary' not in tokenized[start].lower():
start = start + 1
# 2010_U_113.html has no summary
if start == len(tokenized):
return -1
text = []
closing = ['appellate history', 'thomson reuters south asia private limited', 'all cases cited',
'cases citing this case', 'legislation cited']
summary_str = ' **Summary:** '
text.append(tokenized[start].replace('\n', ' ')[len(summary_str):])
for i in range(start+1,len(tokenized)):
if any(closing_phrase in tokenized[i].lower() for closing_phrase in closing) :
break
text.append(tokenized[i].replace('\n',' '))
# just in case some closing phrase is missed
# if('thomson reuters south asia private limited' in tokenized[i].lower()):
# print('verify ', file)
summary = ' '.join(text)
return summary
def summary_looper(from_year=2010, to_year=2019, reached_file=''):
'''
Loop over all files from 2010-2018, generate summaries and compute rouge scores
'''
rouge = Rouge()
scores = OrderedDict()
summaries = {}
REACHED_FILE = reached_file
i = sum([ len( os.listdir( FULL_TEXT + str(year) ) ) for year in range(2010, from_year)]) + 1
i0 = i-1
# length is total number of documents
length = sum([ len( os.listdir( FULL_TEXT + str(year) ) ) for year in range(2010, 2019)])
for year in range(from_year, to_year):
l = sum([len( os.listdir( FULL_TEXT + str(y) ) ) for y in range(from_year, to_year)])
print('\n\n <=== ', year, ' / ', l, '===>\n\n')
for case in sorted(os.listdir(FULL_TEXT + str(year))):
print(i, ' / ', length, '=>', case, end='\r')
if REACHED_FILE != '' and case <= REACHED_FILE:
i += 1
continue
i2 = i - i0
manual_summary = get_manual_summary(MANUAL_SUM + str(year) + '/' + case)
if manual_summary == -1:
print('+- Score ', i2, ' / ', l, ' ', i, ' / ', length, ' ', case, ' => Skipped')
i += 1
continue
start = time.time()
system_generated_summary = get_summary(FULL_TEXT + str(year) + '/' + case)
running_time = time.time() - start
print('Summary generated in ', running_time)
# available are rouge-1, rouge-2, rouge-l
scores[case] = rouge.get_scores(system_generated_summary, manual_summary)[0]
summaries[case] = system_generated_summary
print('+- Score ', i2, ' / ', l, ' ', i, ' / ', length, ' ', case, ' => ', scores[case])
i += 1
# print('\n\n-----------------------------------------')
# print('Average score for ', len(scores), ' documents')
# print('Fscore: ', sum([scores[case]['f'] for case in scores])) / len(scores)
# print('Precision: ', sum([scores[case]['p'] for case in scores])) / len(scores)
# print('Recall: ', sum([scores[case]['r'] for case in scores])) / len(scores)
# Not doing top 10, instead handpicked cases
# print('\n Top 10 documents by recall:')
# metric = {case: scores[case]['r'] for case in scores}
# sorted_r = sorted(metric.items(), key=operator.itemgetter(1), reverse=True)
# # printing top 10 rouge scores
# for i in range(10):
# case = sorted_r[i][0]
# print('# ',i+1, ': ', case, ' => ', sorted_r[i][1])
# print(summaries[case], '\n')
if __name__ == '__main__':
from_year, to_year, reached_file = 2010, 2019, ''
if len(sys.argv) > 2:
from_year, to_year = int(sys.argv[1]), int(sys.argv[2])
if len(sys.argv) > 3:
reached_file = sys.argv[3]
summary_looper(from_year, to_year, reached_file)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.