language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | airbytehq__airbyte | airbyte-integrations/connectors/source-facebook-marketing/source_facebook_marketing/source.py | {
"start": 1744,
"end": 14837
} | class ____(AbstractSource):
# Skip exceptions on missing streams
raise_exception_on_missing_stream = True
def _validate_and_transform(self, config: Mapping[str, Any]):
config.setdefault("action_breakdowns_allow_empty", False)
if config.get("end_date") == "":
config.pop("end_date")
config = ConnectorConfig.parse_obj(config)
default_ads_insights_action_breakdowns = (
config.default_ads_insights_action_breakdowns
if config.default_ads_insights_action_breakdowns is not None
else AdsInsights.action_breakdowns
)
config.default_ads_insights_action_breakdowns = default_ads_insights_action_breakdowns
if config.start_date:
config.start_date = AirbyteDateTime.from_datetime(config.start_date)
if config.end_date:
config.end_date = AirbyteDateTime.from_datetime(config.end_date)
config.account_ids = list(config.account_ids)
return config
def check_connection(self, logger: logging.Logger, config: Mapping[str, Any]) -> Tuple[bool, Optional[Any]]:
"""Connection check to validate that the user-provided config can be used to connect to the underlying API
:param logger: source logger
:param config: the user-input config object conforming to the connector's spec.json
:return Tuple[bool, Any]: (True, None) if the input config can be used to connect to the API successfully, (False, error) otherwise.
"""
try:
config = self._validate_and_transform(config)
if config.end_date > ab_datetime_now():
return False, "Date range can not be in the future."
if config.start_date and config.end_date < config.start_date:
return False, "End date must be equal or after start date."
if config.credentials is not None:
api = API(access_token=config.credentials.access_token, page_size=config.page_size)
else:
api = API(access_token=config.access_token, page_size=config.page_size)
for account_id in config.account_ids:
# Get Ad Account to check creds
logger.info(f"Attempting to retrieve information for account with ID: {account_id}")
ad_account = api.get_account(account_id=account_id)
logger.info(f"Successfully retrieved account information for account: {ad_account}")
# make sure that we have valid combination of "action_breakdowns" and "breakdowns" parameters
for stream in self.get_custom_insights_streams(api, config):
stream.check_breakdowns(account_id=account_id)
except facebook_business.exceptions.FacebookRequestError as e:
return False, e._api_error_message
except AirbyteTracedException as e:
return False, f"{e.message}. Full error: {e.internal_message}"
except Exception as e:
return False, f"Unexpected error: {repr(e)}"
return True, None
def streams(self, config: Mapping[str, Any]) -> List[Type[Stream]]:
"""Discovery method, returns available streams
:param config: A Mapping of the user input configuration as defined in the connector spec.
:return: list of the stream instances
"""
config = self._validate_and_transform(config)
if config.start_date:
config.start_date = validate_start_date(config.start_date)
config.end_date = validate_end_date(config.start_date, config.end_date)
if config.credentials is not None:
api = API(access_token=config.credentials.access_token, page_size=config.page_size)
else:
api = API(access_token=config.access_token, page_size=config.page_size)
# if start_date not specified then set default start_date for report streams to 2 years ago
report_start_date = config.start_date or (ab_datetime_now() - timedelta(days=365 * 2))
insights_args = dict(
api=api,
account_ids=config.account_ids,
start_date=report_start_date,
end_date=config.end_date,
insights_lookback_window=config.insights_lookback_window,
insights_job_timeout=config.insights_job_timeout,
filter_statuses=[status.value for status in [*ValidAdStatuses]],
)
streams = [
AdAccount(api=api, account_ids=config.account_ids),
AdSets(
api=api,
account_ids=config.account_ids,
start_date=config.start_date,
end_date=config.end_date,
filter_statuses=config.adset_statuses,
page_size=config.page_size,
),
Ads(
api=api,
account_ids=config.account_ids,
start_date=config.start_date,
end_date=config.end_date,
filter_statuses=config.ad_statuses,
page_size=config.page_size,
),
AdCreatives(
api=api,
account_ids=config.account_ids,
fetch_thumbnail_images=config.fetch_thumbnail_images,
page_size=config.page_size,
),
AdsInsights(
page_size=config.page_size,
action_breakdowns=config.default_ads_insights_action_breakdowns,
# in case user input is an empty list of action_breakdowns we allow empty breakdowns
action_breakdowns_allow_empty=config.default_ads_insights_action_breakdowns == [],
**insights_args,
),
AdsInsightsAgeAndGender(page_size=config.page_size, **insights_args),
AdsInsightsCountry(page_size=config.page_size, **insights_args),
AdsInsightsRegion(page_size=config.page_size, **insights_args),
AdsInsightsDma(page_size=config.page_size, **insights_args),
AdsInsightsPlatformAndDevice(page_size=config.page_size, **insights_args),
AdsInsightsActionType(page_size=config.page_size, **insights_args),
AdsInsightsActionCarouselCard(page_size=config.page_size, **insights_args),
AdsInsightsActionConversionDevice(page_size=config.page_size, **insights_args),
AdsInsightsActionProductID(page_size=config.page_size, **insights_args),
AdsInsightsActionReaction(page_size=config.page_size, **insights_args),
AdsInsightsActionVideoSound(page_size=config.page_size, **insights_args),
AdsInsightsActionVideoType(page_size=config.page_size, **insights_args),
AdsInsightsDeliveryDevice(page_size=config.page_size, **insights_args),
AdsInsightsDeliveryPlatform(page_size=config.page_size, **insights_args),
AdsInsightsDeliveryPlatformAndDevicePlatform(page_size=config.page_size, **insights_args),
AdsInsightsDemographicsAge(page_size=config.page_size, **insights_args),
AdsInsightsDemographicsCountry(page_size=config.page_size, **insights_args),
AdsInsightsDemographicsDMARegion(page_size=config.page_size, **insights_args),
AdsInsightsDemographicsGender(page_size=config.page_size, **insights_args),
Campaigns(
api=api,
account_ids=config.account_ids,
start_date=config.start_date,
end_date=config.end_date,
filter_statuses=config.campaign_statuses,
page_size=config.page_size,
),
CustomConversions(
api=api,
account_ids=config.account_ids,
page_size=config.page_size,
),
CustomAudiences(
api=api,
account_ids=config.account_ids,
page_size=config.page_size,
),
Images(
api=api,
account_ids=config.account_ids,
start_date=config.start_date,
end_date=config.end_date,
page_size=config.page_size,
),
Videos(
api=api,
account_ids=config.account_ids,
start_date=config.start_date,
end_date=config.end_date,
page_size=config.page_size,
),
Activities(
api=api,
account_ids=config.account_ids,
start_date=config.start_date,
end_date=config.end_date,
page_size=config.page_size,
),
]
return streams + self.get_custom_insights_streams(api, config)
def spec(self, *args, **kwargs) -> ConnectorSpecification:
"""Returns the spec for this integration.
The spec is a JSON-Schema object describing the required configurations
(e.g: username and password) required to run this integration.
"""
return ConnectorSpecification(
documentationUrl="https://docs.airbyte.com/integrations/sources/facebook-marketing",
changelogUrl="https://docs.airbyte.com/integrations/sources/facebook-marketing",
supportsIncremental=True,
supported_destination_sync_modes=[DestinationSyncMode.append],
connectionSpecification=ConnectorConfig.schema(),
advanced_auth=AdvancedAuth(
auth_flow_type=AuthFlowType.oauth2_0,
predicate_key=["credentials", "auth_type"],
predicate_value="Client",
oauth_config_specification=OAuthConfigSpecification(
complete_oauth_output_specification={
"type": "object",
"properties": {
"access_token": {
"type": "string",
"path_in_connector_config": ["credentials", "access_token"],
},
},
},
complete_oauth_server_input_specification={
"type": "object",
"properties": {
"client_id": {"type": "string"},
"client_secret": {"type": "string"},
},
},
complete_oauth_server_output_specification={
"type": "object",
"additionalProperties": True,
"properties": {
"client_id": {
"type": "string",
"path_in_connector_config": ["credentials", "client_id"],
},
"client_secret": {
"type": "string",
"path_in_connector_config": ["credentials", "client_secret"],
},
},
},
),
),
)
def get_custom_insights_streams(self, api: API, config: ConnectorConfig) -> List[Type[Stream]]:
"""return custom insights streams"""
streams = []
for insight in config.custom_insights or []:
insight_fields = set(insight.fields)
if insight_fields.intersection(UNSUPPORTED_FIELDS):
# https://github.com/airbytehq/oncall/issues/1137
message = (
f"The custom fields `{insight_fields.intersection(UNSUPPORTED_FIELDS)}` are not a valid configuration for"
f" `{insight.name}'. Review Facebook Marketing's docs https://developers.facebook.com/docs/marketing-api/reference/ads-action-stats/ for valid breakdowns."
)
raise AirbyteTracedException(
message=message,
failure_type=FailureType.config_error,
)
stream = AdsInsights(
api=api,
account_ids=config.account_ids,
name=f"Custom{insight.name}",
fields=list(insight_fields),
breakdowns=list(set(insight.breakdowns)),
action_breakdowns=list(set(insight.action_breakdowns)),
action_breakdowns_allow_empty=config.action_breakdowns_allow_empty,
time_increment=insight.time_increment,
start_date=insight.start_date or config.start_date or (ab_datetime_now() - timedelta(days=365 * 2)),
end_date=insight.end_date or config.end_date,
insights_lookback_window=insight.insights_lookback_window or config.insights_lookback_window,
insights_job_timeout=insight.insights_job_timeout or config.insights_job_timeout,
level=insight.level,
)
streams.append(stream)
return streams
| SourceFacebookMarketing |
python | scipy__scipy | scipy/stats/tests/test_stats.py | {
"start": 199391,
"end": 227845
} | class ____:
"""Tests 2-samples with K-S various sizes, alternatives, modes."""
def _testOne(self, x1, x2, alternative, expected_statistic, expected_prob,
mode='auto'):
result = stats.ks_2samp(x1, x2, alternative, mode=mode)
expected = np.array([expected_statistic, expected_prob])
assert_array_almost_equal(np.array(result), expected)
def testSmall(self):
self._testOne([0], [1], 'two-sided', 1.0/1, 1.0)
self._testOne([0], [1], 'greater', 1.0/1, 0.5)
self._testOne([0], [1], 'less', 0.0/1, 1.0)
self._testOne([1], [0], 'two-sided', 1.0/1, 1.0)
self._testOne([1], [0], 'greater', 0.0/1, 1.0)
self._testOne([1], [0], 'less', 1.0/1, 0.5)
def testTwoVsThree(self):
data1 = np.array([1.0, 2.0])
data1p = data1 + 0.01
data1m = data1 - 0.01
data2 = np.array([1.0, 2.0, 3.0])
self._testOne(data1p, data2, 'two-sided', 1.0 / 3, 1.0)
self._testOne(data1p, data2, 'greater', 1.0 / 3, 0.7)
self._testOne(data1p, data2, 'less', 1.0 / 3, 0.7)
self._testOne(data1m, data2, 'two-sided', 2.0 / 3, 0.6)
self._testOne(data1m, data2, 'greater', 2.0 / 3, 0.3)
self._testOne(data1m, data2, 'less', 0, 1.0)
def testTwoVsFour(self):
data1 = np.array([1.0, 2.0])
data1p = data1 + 0.01
data1m = data1 - 0.01
data2 = np.array([1.0, 2.0, 3.0, 4.0])
self._testOne(data1p, data2, 'two-sided', 2.0 / 4, 14.0/15)
self._testOne(data1p, data2, 'greater', 2.0 / 4, 8.0/15)
self._testOne(data1p, data2, 'less', 1.0 / 4, 12.0/15)
self._testOne(data1m, data2, 'two-sided', 3.0 / 4, 6.0/15)
self._testOne(data1m, data2, 'greater', 3.0 / 4, 3.0/15)
self._testOne(data1m, data2, 'less', 0, 1.0)
def test100_100(self):
x100 = np.linspace(1, 100, 100)
x100_2_p1 = x100 + 2 + 0.1
x100_2_m1 = x100 + 2 - 0.1
self._testOne(x100, x100_2_p1, 'two-sided', 3.0 / 100, 0.9999999999962055)
self._testOne(x100, x100_2_p1, 'greater', 3.0 / 100, 0.9143290114276248)
self._testOne(x100, x100_2_p1, 'less', 0, 1.0)
self._testOne(x100, x100_2_m1, 'two-sided', 2.0 / 100, 1.0)
self._testOne(x100, x100_2_m1, 'greater', 2.0 / 100, 0.960978450786184)
self._testOne(x100, x100_2_m1, 'less', 0, 1.0)
def test100_110(self):
x100 = np.linspace(1, 100, 100)
x110 = np.linspace(1, 100, 110)
x110_20_p1 = x110 + 20 + 0.1
x110_20_m1 = x110 + 20 - 0.1
# 100, 110
self._testOne(x100, x110_20_p1, 'two-sided', 232.0 / 1100, 0.015739183865607353)
self._testOne(x100, x110_20_p1, 'greater', 232.0 / 1100, 0.007869594319053203)
self._testOne(x100, x110_20_p1, 'less', 0, 1)
self._testOne(x100, x110_20_m1, 'two-sided', 229.0 / 1100, 0.017803803861026313)
self._testOne(x100, x110_20_m1, 'greater', 229.0 / 1100, 0.008901905958245056)
self._testOne(x100, x110_20_m1, 'less', 0.0, 1.0)
def testRepeatedValues(self):
x2233 = np.array([2] * 3 + [3] * 4 + [5] * 5 + [6] * 4, dtype=int)
x3344 = x2233 + 1
x2356 = np.array([2] * 3 + [3] * 4 + [5] * 10 + [6] * 4, dtype=int)
x3467 = np.array([3] * 10 + [4] * 2 + [6] * 10 + [7] * 4, dtype=int)
self._testOne(x2233, x3344, 'two-sided', 5.0/16, 0.4262934613454952)
self._testOne(x2233, x3344, 'greater', 5.0/16, 0.21465428276573786)
self._testOne(x2233, x3344, 'less', 0.0/16, 1.0)
self._testOne(x2356, x3467, 'two-sided', 190.0/21/26, 0.0919245790168125)
self._testOne(x2356, x3467, 'greater', 190.0/21/26, 0.0459633806858544)
self._testOne(x2356, x3467, 'less', 70.0/21/26, 0.6121593130022775)
def testEqualSizes(self):
data2 = np.array([1.0, 2.0, 3.0])
self._testOne(data2, data2+1, 'two-sided', 1.0/3, 1.0)
self._testOne(data2, data2+1, 'greater', 1.0/3, 0.75)
self._testOne(data2, data2+1, 'less', 0.0/3, 1.)
self._testOne(data2, data2+0.5, 'two-sided', 1.0/3, 1.0)
self._testOne(data2, data2+0.5, 'greater', 1.0/3, 0.75)
self._testOne(data2, data2+0.5, 'less', 0.0/3, 1.)
self._testOne(data2, data2-0.5, 'two-sided', 1.0/3, 1.0)
self._testOne(data2, data2-0.5, 'greater', 0.0/3, 1.0)
self._testOne(data2, data2-0.5, 'less', 1.0/3, 0.75)
@pytest.mark.slow
def testMiddlingBoth(self):
# 500, 600
n1, n2 = 500, 600
delta = 1.0/n1/n2/2/2
x = np.linspace(1, 200, n1) - delta
y = np.linspace(2, 200, n2)
self._testOne(x, y, 'two-sided', 2000.0 / n1 / n2, 1.0,
mode='auto')
self._testOne(x, y, 'two-sided', 2000.0 / n1 / n2, 1.0,
mode='asymp')
self._testOne(x, y, 'greater', 2000.0 / n1 / n2, 0.9697596024683929,
mode='asymp')
self._testOne(x, y, 'less', 500.0 / n1 / n2, 0.9968735843165021,
mode='asymp')
with warnings.catch_warnings():
message = "ks_2samp: Exact calculation unsuccessful."
warnings.filterwarnings("ignore", message, RuntimeWarning)
self._testOne(x, y, 'greater', 2000.0 / n1 / n2, 0.9697596024683929,
mode='exact')
self._testOne(x, y, 'less', 500.0 / n1 / n2, 0.9968735843165021,
mode='exact')
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
self._testOne(x, y, 'less', 500.0 / n1 / n2, 0.9968735843165021,
mode='exact')
_check_warnings(w, RuntimeWarning, 1)
@pytest.mark.slow
def testMediumBoth(self):
# 1000, 1100
n1, n2 = 1000, 1100
delta = 1.0/n1/n2/2/2
x = np.linspace(1, 200, n1) - delta
y = np.linspace(2, 200, n2)
self._testOne(x, y, 'two-sided', 6600.0 / n1 / n2, 1.0,
mode='asymp')
self._testOne(x, y, 'two-sided', 6600.0 / n1 / n2, 1.0,
mode='auto')
self._testOne(x, y, 'greater', 6600.0 / n1 / n2, 0.9573185808092622,
mode='asymp')
self._testOne(x, y, 'less', 1000.0 / n1 / n2, 0.9982410869433984,
mode='asymp')
with warnings.catch_warnings():
message = "ks_2samp: Exact calculation unsuccessful."
warnings.filterwarnings("ignore", message, RuntimeWarning)
self._testOne(x, y, 'greater', 6600.0 / n1 / n2, 0.9573185808092622,
mode='exact')
self._testOne(x, y, 'less', 1000.0 / n1 / n2, 0.9982410869433984,
mode='exact')
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
self._testOne(x, y, 'less', 1000.0 / n1 / n2, 0.9982410869433984,
mode='exact')
_check_warnings(w, RuntimeWarning, 1)
def testLarge(self):
# 10000, 110
n1, n2 = 10000, 110
lcm = n1*11.0
delta = 1.0/n1/n2/2/2
x = np.linspace(1, 200, n1) - delta
y = np.linspace(2, 100, n2)
self._testOne(x, y, 'two-sided', 55275.0 / lcm, 4.2188474935755949e-15)
self._testOne(x, y, 'greater', 561.0 / lcm, 0.99115454582047591)
self._testOne(x, y, 'less', 55275.0 / lcm, 3.1317328311518713e-26)
def test_gh11184(self):
# 3000, 3001, exact two-sided
rng = np.random.RandomState(123456)
x = rng.normal(size=3000)
y = rng.normal(size=3001) * 1.5
self._testOne(x, y, 'two-sided', 0.11292880151060758, 2.7755575615628914e-15,
mode='asymp')
self._testOne(x, y, 'two-sided', 0.11292880151060758, 2.7755575615628914e-15,
mode='exact')
@pytest.mark.xslow
def test_gh11184_bigger(self):
# 10000, 10001, exact two-sided
rng = np.random.RandomState(123456)
x = rng.normal(size=10000)
y = rng.normal(size=10001) * 1.5
self._testOne(x, y, 'two-sided', 0.10597913208679133, 3.3149311398483503e-49,
mode='asymp')
self._testOne(x, y, 'two-sided', 0.10597913208679133, 2.7755575615628914e-15,
mode='exact')
self._testOne(x, y, 'greater', 0.10597913208679133, 2.7947433906389253e-41,
mode='asymp')
self._testOne(x, y, 'less', 0.09658002199780022, 2.7947433906389253e-41,
mode='asymp')
@pytest.mark.xslow
def test_gh12999(self):
rng = np.random.RandomState(123456)
for x in range(1000, 12000, 1000):
vals1 = rng.normal(size=(x))
vals2 = rng.normal(size=(x + 10), loc=0.5)
exact = stats.ks_2samp(vals1, vals2, mode='exact').pvalue
asymp = stats.ks_2samp(vals1, vals2, mode='asymp').pvalue
# these two p-values should be in line with each other
assert_array_less(exact, 3 * asymp)
assert_array_less(asymp, 3 * exact)
@pytest.mark.slow
def testLargeBoth(self):
# 10000, 11000
n1, n2 = 10000, 11000
lcm = n1*11.0
delta = 1.0/n1/n2/2/2
x = np.linspace(1, 200, n1) - delta
y = np.linspace(2, 200, n2)
self._testOne(x, y, 'two-sided', 563.0 / lcm, 0.9990660108966576,
mode='asymp')
self._testOne(x, y, 'two-sided', 563.0 / lcm, 0.9990456491488628,
mode='exact')
self._testOne(x, y, 'two-sided', 563.0 / lcm, 0.9990660108966576,
mode='auto')
self._testOne(x, y, 'greater', 563.0 / lcm, 0.7561851877420673)
self._testOne(x, y, 'less', 10.0 / lcm, 0.9998239693191724)
with warnings.catch_warnings():
message = "ks_2samp: Exact calculation unsuccessful."
warnings.filterwarnings("ignore", message, RuntimeWarning)
self._testOne(x, y, 'greater', 563.0 / lcm, 0.7561851877420673,
mode='exact')
self._testOne(x, y, 'less', 10.0 / lcm, 0.9998239693191724,
mode='exact')
def testNamedAttributes(self):
# test for namedtuple attribute results
attributes = ('statistic', 'pvalue')
res = stats.ks_2samp([1, 2], [3])
check_named_results(res, attributes)
@pytest.mark.slow
def test_some_code_paths(self):
# Check that some code paths are executed
from scipy.stats._stats_py import (
_count_paths_outside_method,
_compute_outer_prob_inside_method
)
_compute_outer_prob_inside_method(1, 1, 1, 1)
_count_paths_outside_method(1000, 1, 1, 1001)
with np.errstate(invalid='raise'):
assert_raises(FloatingPointError, _count_paths_outside_method,
1100, 1099, 1, 1)
assert_raises(FloatingPointError, _count_paths_outside_method,
2000, 1000, 1, 1)
@pytest.mark.parametrize('case', (([], [1]), ([1], []), ([], [])))
def test_argument_checking(self, case):
# Check that an empty array warns
with pytest.warns(SmallSampleWarning, match=too_small_1d_not_omit):
res = stats.ks_2samp(*case)
assert_equal(res.statistic, np.nan)
assert_equal(res.pvalue, np.nan)
@pytest.mark.xslow
def test_gh12218(self):
"""Ensure gh-12218 is fixed."""
# gh-1228 triggered a TypeError calculating sqrt(n1*n2*(n1+n2)).
# n1, n2 both large integers, the product exceeded 2^64
rng = np.random.default_rng(8751495592)
n1 = 2097152 # 2*^21
rvs1 = stats.uniform.rvs(size=n1, loc=0., scale=1, random_state=rng)
rvs2 = rvs1 + 1 # Exact value of rvs2 doesn't matter.
stats.ks_2samp(rvs1, rvs2, alternative='greater', mode='asymp')
stats.ks_2samp(rvs1, rvs2, alternative='less', mode='asymp')
stats.ks_2samp(rvs1, rvs2, alternative='two-sided', mode='asymp')
def test_warnings_gh_14019(self):
# Check that RuntimeWarning is raised when method='auto' and exact
# p-value calculation fails. See gh-14019.
rng = np.random.RandomState(seed=23493549)
# random samples of the same size as in the issue
data1 = rng.random(size=881) + 0.5
data2 = rng.random(size=369)
message = "ks_2samp: Exact calculation unsuccessful"
with pytest.warns(RuntimeWarning, match=message):
res = stats.ks_2samp(data1, data2, alternative='less')
assert_allclose(res.pvalue, 0, atol=1e-14)
@pytest.mark.parametrize("ksfunc", [stats.kstest, stats.ks_2samp])
@pytest.mark.parametrize("alternative, x6val, ref_location, ref_sign",
[('greater', 5.9, 5.9, +1),
('less', 6.1, 6.0, -1),
('two-sided', 5.9, 5.9, +1),
('two-sided', 6.1, 6.0, -1)])
def test_location_sign(self, ksfunc, alternative,
x6val, ref_location, ref_sign):
# Test that location and sign corresponding with statistic are as
# expected. (Test is designed to be easy to predict.)
x = np.arange(10, dtype=np.float64)
y = x.copy()
x[6] = x6val
res = stats.ks_2samp(x, y, alternative=alternative)
assert res.statistic == 0.1
assert res.statistic_location == ref_location
assert res.statistic_sign == ref_sign
def test_ttest_rel():
# regression test
tr,pr = 0.81248591389165692, 0.41846234511362157
tpr = ([tr,-tr],[pr,pr])
rvs1 = np.linspace(1,100,100)
rvs2 = np.linspace(1.01,99.989,100)
rvs1_2D = np.array([np.linspace(1,100,100), np.linspace(1.01,99.989,100)])
rvs2_2D = np.array([np.linspace(1.01,99.989,100), np.linspace(1,100,100)])
t,p = stats.ttest_rel(rvs1, rvs2, axis=0)
assert_array_almost_equal([t,p],(tr,pr))
t,p = stats.ttest_rel(rvs1_2D.T, rvs2_2D.T, axis=0)
assert_array_almost_equal([t,p],tpr)
t,p = stats.ttest_rel(rvs1_2D, rvs2_2D, axis=1)
assert_array_almost_equal([t,p],tpr)
# test scalars
with warnings.catch_warnings(), \
np.errstate(invalid="ignore", divide="ignore"):
warnings.filterwarnings(
"ignore", "Degrees of freedom <= 0 for slice", RuntimeWarning)
t, p = stats.ttest_rel(4., 3.)
assert_(np.isnan(t))
assert_(np.isnan(p))
# test for namedtuple attribute results
attributes = ('statistic', 'pvalue')
res = stats.ttest_rel(rvs1, rvs2, axis=0)
check_named_results(res, attributes)
# test on 3 dimensions
rvs1_3D = np.dstack([rvs1_2D,rvs1_2D,rvs1_2D])
rvs2_3D = np.dstack([rvs2_2D,rvs2_2D,rvs2_2D])
t,p = stats.ttest_rel(rvs1_3D, rvs2_3D, axis=1)
assert_array_almost_equal(np.abs(t), tr)
assert_array_almost_equal(np.abs(p), pr)
assert_equal(t.shape, (2, 3))
t, p = stats.ttest_rel(np.moveaxis(rvs1_3D, 2, 0),
np.moveaxis(rvs2_3D, 2, 0),
axis=2)
assert_array_almost_equal(np.abs(t), tr)
assert_array_almost_equal(np.abs(p), pr)
assert_equal(t.shape, (3, 2))
# test alternative parameter
assert_raises(ValueError, stats.ttest_rel, rvs1, rvs2, alternative="error")
t, p = stats.ttest_rel(rvs1, rvs2, axis=0, alternative="less")
assert_allclose(p, 1 - pr/2)
assert_allclose(t, tr)
t, p = stats.ttest_rel(rvs1, rvs2, axis=0, alternative="greater")
assert_allclose(p, pr/2)
assert_allclose(t, tr)
# check nan policy
rng = np.random.RandomState(12345678)
x = stats.norm.rvs(loc=5, scale=10, size=501, random_state=rng)
x[500] = np.nan
y = (stats.norm.rvs(loc=5, scale=10, size=501, random_state=rng) +
stats.norm.rvs(scale=0.2, size=501, random_state=rng))
y[500] = np.nan
with np.errstate(invalid="ignore"):
assert_array_equal(stats.ttest_rel(x, x), (np.nan, np.nan))
assert_array_almost_equal(stats.ttest_rel(x, y, nan_policy='omit'),
(0.25299925303978066, 0.8003729814201519))
assert_raises(ValueError, stats.ttest_rel, x, y, nan_policy='raise')
assert_raises(ValueError, stats.ttest_rel, x, y, nan_policy='foobar')
# test zero division problem
with pytest.warns(RuntimeWarning, match="Precision loss occurred"):
t, p = stats.ttest_rel([0, 0, 0], [1, 1, 1])
assert_equal((np.abs(t), p), (np.inf, 0))
with np.errstate(invalid="ignore"):
assert_equal(stats.ttest_rel([0, 0, 0], [0, 0, 0]), (np.nan, np.nan))
# check that nan in input array result in nan output
anan = np.array([[1, np.nan], [-1, 1]])
assert_equal(stats.ttest_rel(anan, np.zeros((2, 2))),
([0, np.nan], [1, np.nan]))
# test incorrect input shape raise an error
x = np.arange(24)
assert_raises(ValueError, stats.ttest_rel, x.reshape((8, 3)),
x.reshape((2, 3, 4)))
# Convert from two-sided p-values to one sided using T result data.
def convert(t, p, alt):
if (t < 0 and alt == "less") or (t > 0 and alt == "greater"):
return p / 2
return 1 - (p / 2)
converter = np.vectorize(convert)
rvs1_2D[:, 20:30] = np.nan
rvs2_2D[:, 15:25] = np.nan
with pytest.warns(SmallSampleWarning, match=too_small_nd_omit):
tr, pr = stats.ttest_rel(rvs1_2D, rvs2_2D, 0, nan_policy='omit')
with pytest.warns(SmallSampleWarning, match=too_small_nd_omit):
t, p = stats.ttest_rel(rvs1_2D, rvs2_2D, 0,
nan_policy='omit', alternative='less')
assert_allclose(t, tr, rtol=1e-14)
with np.errstate(invalid='ignore'):
assert_allclose(p, converter(tr, pr, 'less'), rtol=1e-14)
with pytest.warns(SmallSampleWarning, match=too_small_nd_omit):
t, p = stats.ttest_rel(rvs1_2D, rvs2_2D, 0,
nan_policy='omit', alternative='greater')
assert_allclose(t, tr, rtol=1e-14)
with np.errstate(invalid='ignore'):
assert_allclose(p, converter(tr, pr, 'greater'), rtol=1e-14)
def test_ttest_rel_nan_2nd_arg():
# regression test for gh-6134: nans in the second arg were not handled
x = [np.nan, 2.0, 3.0, 4.0]
y = [1.0, 2.0, 1.0, 2.0]
r1 = stats.ttest_rel(x, y, nan_policy='omit')
r2 = stats.ttest_rel(y, x, nan_policy='omit')
assert_allclose(r2.statistic, -r1.statistic, atol=1e-15)
assert_allclose(r2.pvalue, r1.pvalue, atol=1e-15)
# NB: arguments are paired when NaNs are dropped
r3 = stats.ttest_rel(y[1:], x[1:])
assert_allclose(r2, r3, atol=1e-15)
# .. and this is consistent with R. R code:
# x = c(NA, 2.0, 3.0, 4.0)
# y = c(1.0, 2.0, 1.0, 2.0)
# t.test(x, y, paired=TRUE)
assert_allclose(r2, (-2, 0.1835), atol=1e-4)
def test_ttest_rel_empty_1d_returns_nan():
# Two empty inputs should return a TtestResult containing nan
# for both values.
with pytest.warns(SmallSampleWarning, match=too_small_1d_not_omit):
result = stats.ttest_rel([], [])
assert isinstance(result, stats._stats_py.TtestResult)
assert_equal(result, (np.nan, np.nan))
@pytest.mark.parametrize('b, expected_shape',
[(np.empty((1, 5, 0)), (3, 5)),
(np.empty((1, 0, 0)), (3, 0))])
def test_ttest_rel_axis_size_zero(b, expected_shape):
# In this test, the length of the axis dimension is zero.
# The results should be arrays containing nan with shape
# given by the broadcast nonaxis dimensions.
a = np.empty((3, 1, 0))
with warnings.catch_warnings():
# first case should warn, second shouldn't?
warnings.filterwarnings("ignore", too_small_nd_not_omit, SmallSampleWarning)
result = stats.ttest_rel(a, b, axis=-1)
assert isinstance(result, stats._stats_py.TtestResult)
expected_value = np.full(expected_shape, fill_value=np.nan)
assert_equal(result.statistic, expected_value)
assert_equal(result.pvalue, expected_value)
def test_ttest_rel_nonaxis_size_zero():
# In this test, the length of the axis dimension is nonzero,
# but one of the nonaxis dimensions has length 0. Check that
# we still get the correctly broadcast shape, which is (5, 0)
# in this case.
a = np.empty((1, 8, 0))
b = np.empty((5, 8, 1))
result = stats.ttest_rel(a, b, axis=1)
assert isinstance(result, stats._stats_py.TtestResult)
assert_equal(result.statistic.shape, (5, 0))
assert_equal(result.pvalue.shape, (5, 0))
@pytest.mark.parametrize("alternative", ['two-sided', 'less', 'greater'])
def test_ttest_rel_ci_1d(alternative):
# test confidence interval method against reference values
rng = np.random.default_rng(3749065329432213059)
n = 10
x = rng.normal(size=n, loc=1.5, scale=2)
y = rng.normal(size=n, loc=2, scale=2)
# Reference values generated with R t.test:
# options(digits=16)
# x = c(1.22825792, 1.63950485, 4.39025641, 0.68609437, 2.03813481,
# -1.20040109, 1.81997937, 1.86854636, 2.94694282, 3.94291373)
# y = c(3.49961496, 1.53192536, 5.53620083, 2.91687718, 0.04858043,
# 3.78505943, 3.3077496 , 2.30468892, 3.42168074, 0.56797592)
# t.test(x, y, paired=TRUE, conf.level=0.85, alternative='l')
ref = {'two-sided': [-1.912194489914035, 0.400169725914035],
'greater': [-1.563944820311475, np.inf],
'less': [-np.inf, 0.05192005631147523]}
res = stats.ttest_rel(x, y, alternative=alternative)
ci = res.confidence_interval(confidence_level=0.85)
assert_allclose(ci, ref[alternative])
assert_equal(res.df, n-1)
@pytest.mark.parametrize("test_fun, args",
[(stats.ttest_1samp, (np.arange(10), 0)),
(stats.ttest_rel, (np.arange(10), np.arange(10)))])
def test_ttest_ci_iv(test_fun, args):
# test `confidence_interval` method input validation
res = test_fun(*args)
message = '`confidence_level` must be a number between 0 and 1.'
with pytest.raises(ValueError, match=message):
res.confidence_interval(confidence_level=10)
def _desc_stats(x1, x2, axis=0, *, xp=None):
xp = array_namespace(x1, x2) if xp is None else xp
def _stats(x, axis=0):
x = xp.asarray(x)
mu = xp.mean(x, axis=axis)
std = xp.std(x, axis=axis, correction=1)
nobs = x.shape[axis]
return mu, std, nobs
return _stats(x1, axis) + _stats(x2, axis)
@make_xp_test_case(stats.ttest_ind, stats.ttest_ind_from_stats)
def test_ttest_ind(xp):
# regression test
tr = xp.asarray(1.0912746897927283)
pr = xp.asarray(0.27647818616351882)
tr_2D = xp.stack([tr, -tr])
pr_2D = xp.stack([pr, pr])
rvs1 = xp.linspace(5, 105, 100)
rvs2 = xp.linspace(1, 100, 100)
rvs1_2D = xp.stack([rvs1, rvs2])
rvs2_2D = xp.stack([rvs2, rvs1])
res = stats.ttest_ind(rvs1, rvs2, axis=0)
t, p = res # check that result object can be unpacked
xp_assert_close(t, tr)
xp_assert_close(p, pr)
res = stats.ttest_ind_from_stats(*_desc_stats(rvs1, rvs2))
t, p = res # check that result object can be unpacked
xp_assert_close(t, tr)
xp_assert_close(p, pr)
res = stats.ttest_ind(rvs1_2D.T, rvs2_2D.T, axis=0)
xp_assert_close(res.statistic, tr_2D)
xp_assert_close(res.pvalue, pr_2D)
res = stats.ttest_ind_from_stats(*_desc_stats(rvs1_2D.T, rvs2_2D.T))
xp_assert_close(res.statistic, tr_2D)
xp_assert_close(res.pvalue, pr_2D)
res = stats.ttest_ind(rvs1_2D, rvs2_2D, axis=1)
xp_assert_close(res.statistic, tr_2D)
xp_assert_close(res.pvalue, pr_2D)
res = stats.ttest_ind_from_stats(*_desc_stats(rvs1_2D, rvs2_2D, axis=1))
xp_assert_close(res.statistic, tr_2D)
xp_assert_close(res.pvalue, pr_2D)
# test on 3 dimensions removed because generic tests in
# test_axis_nan_policy are much stronger
# test alternative parameter
message = "`alternative` must be 'less', 'greater', or 'two-sided'."
with pytest.raises(ValueError, match=message):
stats.ttest_ind(rvs1, rvs2, alternative = "error")
args = _desc_stats(rvs1_2D.T, rvs2_2D.T)
with pytest.raises(ValueError, match=message):
stats.ttest_ind_from_stats(*args, alternative = "error")
t, p = stats.ttest_ind(rvs1, rvs2, alternative="less")
xp_assert_close(p, 1 - (pr/2))
xp_assert_close(t, tr)
t, p = stats.ttest_ind(rvs1, rvs2, alternative="greater")
xp_assert_close(p, pr/2)
xp_assert_close(t, tr)
# Check that ttest_ind_from_stats agrees with ttest_ind
res1 = stats.ttest_ind(rvs1_2D.T, rvs2_2D.T, axis=0, alternative="less")
args = _desc_stats(rvs1_2D.T, rvs2_2D.T)
res2 = stats.ttest_ind_from_stats(*args, alternative="less")
xp_assert_close(res1.statistic, res2.statistic)
xp_assert_close(res1.pvalue, res2.pvalue)
res1 = stats.ttest_ind(rvs1_2D.T, rvs2_2D.T, axis=0, alternative="less")
args = _desc_stats(rvs1_2D.T, rvs2_2D.T)
res2 = stats.ttest_ind_from_stats(*args, alternative="less")
xp_assert_close(res1.statistic, res2.statistic)
xp_assert_close(res1.pvalue, res2.pvalue)
# test NaNs
NaN = xp.asarray(xp.nan)
rvs1 = xp.where(xp.arange(rvs1.shape[0]) == 0, NaN, rvs1)
res = stats.ttest_ind(rvs1, rvs2, axis=0)
xp_assert_equal(res.statistic, NaN)
xp_assert_equal(res.pvalue, NaN)
res = stats.ttest_ind_from_stats(*_desc_stats(rvs1, rvs2))
xp_assert_equal(res.statistic, NaN)
xp_assert_equal(res.pvalue, NaN)
def test_ttest_ind_nan_policy():
rvs1 = np.linspace(5, 105, 100)
rvs2 = np.linspace(1, 100, 100)
rvs1_2D = np.array([rvs1, rvs2])
rvs2_2D = np.array([rvs2, rvs1])
rvs1_3D = np.dstack([rvs1_2D, rvs1_2D, rvs1_2D])
rvs2_3D = np.dstack([rvs2_2D, rvs2_2D, rvs2_2D])
# check nan policy
rng = np.random.RandomState(12345678)
x = stats.norm.rvs(loc=5, scale=10, size=501, random_state=rng)
x[500] = np.nan
y = stats.norm.rvs(loc=5, scale=10, size=500, random_state=rng)
with np.errstate(invalid="ignore"):
assert_array_equal(stats.ttest_ind(x, y), (np.nan, np.nan))
assert_array_almost_equal(stats.ttest_ind(x, y, nan_policy='omit'),
(0.24779670949091914, 0.80434267337517906))
assert_raises(ValueError, stats.ttest_ind, x, y, nan_policy='raise')
assert_raises(ValueError, stats.ttest_ind, x, y, nan_policy='foobar')
# test zero division problem
with pytest.warns(RuntimeWarning, match="Precision loss occurred"):
t, p = stats.ttest_ind([0, 0, 0], [1, 1, 1])
assert_equal((np.abs(t), p), (np.inf, 0))
with np.errstate(invalid="ignore"):
assert_equal(stats.ttest_ind([0, 0, 0], [0, 0, 0]), (np.nan, np.nan))
# check that nan in input array result in nan output
anan = np.array([[1, np.nan], [-1, 1]])
assert_equal(stats.ttest_ind(anan, np.zeros((2, 2))),
([0, np.nan], [1, np.nan]))
rvs1_3D[:, :, 10:15] = np.nan
rvs2_3D[:, :, 6:12] = np.nan
# Convert from two-sided p-values to one sided using T result data.
def convert(t, p, alt):
if (t < 0 and alt == "less") or (t > 0 and alt == "greater"):
return p / 2
return 1 - (p / 2)
converter = np.vectorize(convert)
tr, pr = stats.ttest_ind(rvs1_3D, rvs2_3D, axis=0, nan_policy='omit')
t, p = stats.ttest_ind(rvs1_3D, rvs2_3D, axis=0, nan_policy='omit',
alternative='less')
assert_allclose(t, tr, rtol=1e-14)
assert_allclose(p, converter(tr, pr, 'less'), rtol=1e-14)
t, p = stats.ttest_ind(rvs1_3D, rvs2_3D, axis=0, nan_policy='omit',
alternative='greater')
assert_allclose(t, tr, rtol=1e-14)
assert_allclose(p, converter(tr, pr, 'greater'), rtol=1e-14)
def test_ttest_ind_scalar():
# test scalars
with warnings.catch_warnings(), np.errstate(invalid="ignore"):
warnings.filterwarnings(
"ignore", "Degrees of freedom <= 0 for slice", RuntimeWarning)
t, p = stats.ttest_ind(4., 3.)
assert np.isnan(t)
assert np.isnan(p)
@pytest.mark.filterwarnings("ignore:Arguments...:DeprecationWarning")
| TestKSTwoSamples |
python | encode__django-rest-framework | tests/models.py | {
"start": 2545,
"end": 2906
} | class ____(RESTFrameworkModel):
name = models.CharField(max_length=100)
target = models.ForeignKey(ForeignKeyTarget, null=True, blank=True,
related_name='nullable_sources',
verbose_name='Optional target object',
on_delete=models.CASCADE)
| NullableForeignKeySource |
python | joke2k__faker | tests/providers/test_color.py | {
"start": 16102,
"end": 16681
} | class ____:
"""Test bg_BG color provider methods"""
def test_color_name(self, faker, num_samples):
for _ in range(num_samples):
color_name = faker.color_name()
assert isinstance(color_name, str)
assert color_name in BgBgColorProvider.all_colors.keys()
def test_safe_color_name(self, faker, num_samples):
for _ in range(num_samples):
safe_color_name = faker.safe_color_name()
assert isinstance(safe_color_name, str)
assert safe_color_name in BgBgColorProvider.safe_colors
| TestBgBg |
python | falconry__falcon | tests/test_request_media.py | {
"start": 1619,
"end": 6341
} | class ____:
def __init__(self, expected_error):
self._expected_error = expected_error
async def on_post(self, req, resp, **kwargs):
with pytest.raises(self._expected_error) as error:
await req.get_media()
self.captured_error = error
@pytest.mark.parametrize(
'media_type',
[
(None),
('*/*'),
('application/json'),
('application/json; charset=utf-8'),
],
)
def test_json(client, media_type):
expected_body = b'{"something": true}'
headers = {'Content-Type': media_type}
client.simulate_post('/', body=expected_body, headers=headers)
media = client.resource.captured_req_media
assert media is not None
assert media.get('something') is True
@pytest.mark.parametrize(
'media_type',
[
('application/msgpack'),
('application/msgpack; charset=utf-8'),
('application/x-msgpack'),
],
)
def test_msgpack(asgi, media_type, msgpack):
client = create_client(
asgi,
{
'application/msgpack': media.MessagePackHandler(),
'application/x-msgpack': media.MessagePackHandler(),
},
)
headers = {'Content-Type': media_type}
# Bytes
expected_body = b'\x81\xc4\tsomething\xc3'
assert (
client.simulate_post('/', body=expected_body, headers=headers).status_code
== 200
)
req_media = client.resource.captured_req_media
assert req_media.get(b'something') is True
# Unicode
expected_body = b'\x81\xa9something\xc3'
assert (
client.simulate_post('/', body=expected_body, headers=headers).status_code
== 200
)
req_media = client.resource.captured_req_media
assert req_media.get('something') is True
@pytest.mark.parametrize(
'media_type',
[
('nope/json'),
],
)
def test_unknown_media_type(asgi, media_type):
client = _create_client_invalid_media(asgi, errors.HTTPUnsupportedMediaType)
headers = {'Content-Type': media_type}
assert (
client.simulate_post('/', body=b'something', headers=headers).status_code == 200
)
title_msg = '415 Unsupported Media Type'
description_msg = f'{media_type} is an unsupported media type.'
assert client.resource.captured_error.value.title == title_msg
assert client.resource.captured_error.value.description == description_msg
@pytest.mark.parametrize('media_type', ['application/json', 'application/msgpack'])
def test_empty_body(asgi, media_type, msgpack):
client = _create_client_invalid_media(
asgi,
errors.HTTPBadRequest,
{
'application/msgpack': media.MessagePackHandler(),
'application/json': media.JSONHandler(),
},
)
headers = {'Content-Type': media_type}
assert client.simulate_post('/', headers=headers).status_code == 200
assert (
'Could not parse an empty' in client.resource.captured_error.value.description
)
assert isinstance(client.resource.captured_error.value, errors.MediaNotFoundError)
def test_invalid_json(asgi):
client = _create_client_invalid_media(asgi, errors.HTTPBadRequest)
expected_body = '{'
headers = {'Content-Type': 'application/json'}
assert (
client.simulate_post('/', body=expected_body, headers=headers).status_code
== 200
)
assert (
'Could not parse JSON body' in client.resource.captured_error.value.description
)
assert isinstance(client.resource.captured_error.value, errors.MediaMalformedError)
try:
json.loads(expected_body)
except Exception as e:
assert type(client.resource.captured_error.value.__cause__) is type(e)
assert str(client.resource.captured_error.value.__cause__) == str(e)
def test_invalid_msgpack(asgi, msgpack):
handlers = {'application/msgpack': media.MessagePackHandler()}
client = _create_client_invalid_media(
asgi, errors.HTTPBadRequest, handlers=handlers
)
expected_body = '/////////////////////'
headers = {'Content-Type': 'application/msgpack'}
assert (
client.simulate_post('/', body=expected_body, headers=headers).status_code
== 200
)
desc = 'Could not parse MessagePack body - unpack(b) received extra data.'
assert client.resource.captured_error.value.description == desc
assert isinstance(client.resource.captured_error.value, errors.MediaMalformedError)
try:
msgpack.unpackb(expected_body.encode('utf-8'))
except Exception as e:
assert type(client.resource.captured_error.value.__cause__) is type(e)
assert str(client.resource.captured_error.value.__cause__) == str(e)
| ResourceInvalidMediaAsync |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/triggers/bigquery.py | {
"start": 1608,
"end": 9801
} | class ____(BaseTrigger):
"""
BigQueryInsertJobTrigger run on the trigger worker to perform insert operation.
:param conn_id: Reference to google cloud connection id
:param job_id: The ID of the job. It will be suffixed with hash of job configuration
:param project_id: Google Cloud Project where the job is running
:param location: The dataset location.
:param dataset_id: The dataset ID of the requested table. (templated)
:param table_id: The table ID of the requested table. (templated)
:param poll_interval: polling period in seconds to check for the status. (templated)
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account. (templated)
"""
def __init__(
self,
conn_id: str,
job_id: str | None,
project_id: str,
location: str | None,
dataset_id: str | None = None,
table_id: str | None = None,
poll_interval: float = 4.0,
impersonation_chain: str | Sequence[str] | None = None,
cancel_on_kill: bool = True,
):
super().__init__()
self.log.info("Using the connection %s .", conn_id)
self.conn_id = conn_id
self.job_id = job_id
self._job_conn = None
self.dataset_id = dataset_id
self.project_id = project_id
self.location = location
self.table_id = table_id
self.poll_interval = poll_interval
self.impersonation_chain = impersonation_chain
self.cancel_on_kill = cancel_on_kill
def serialize(self) -> tuple[str, dict[str, Any]]:
"""Serialize BigQueryInsertJobTrigger arguments and classpath."""
return (
"airflow.providers.google.cloud.triggers.bigquery.BigQueryInsertJobTrigger",
{
"conn_id": self.conn_id,
"job_id": self.job_id,
"dataset_id": self.dataset_id,
"project_id": self.project_id,
"location": self.location,
"table_id": self.table_id,
"poll_interval": self.poll_interval,
"impersonation_chain": self.impersonation_chain,
"cancel_on_kill": self.cancel_on_kill,
},
)
if not AIRFLOW_V_3_0_PLUS:
@provide_session
def get_task_instance(self, session: Session) -> TaskInstance:
query = session.query(TaskInstance).filter(
TaskInstance.dag_id == self.task_instance.dag_id,
TaskInstance.task_id == self.task_instance.task_id,
TaskInstance.run_id == self.task_instance.run_id,
TaskInstance.map_index == self.task_instance.map_index,
)
task_instance = query.one_or_none()
if task_instance is None:
raise AirflowException(
"TaskInstance with dag_id: %s, task_id: %s, run_id: %s and map_index: %s is not found",
self.task_instance.dag_id,
self.task_instance.task_id,
self.task_instance.run_id,
self.task_instance.map_index,
)
return task_instance
async def get_task_state(self):
from airflow.sdk.execution_time.task_runner import RuntimeTaskInstance
task_states_response = await sync_to_async(RuntimeTaskInstance.get_task_states)(
dag_id=self.task_instance.dag_id,
task_ids=[self.task_instance.task_id],
run_ids=[self.task_instance.run_id],
map_index=self.task_instance.map_index,
)
try:
task_state = task_states_response[self.task_instance.run_id][self.task_instance.task_id]
except Exception:
raise AirflowException(
"TaskInstance with dag_id: %s, task_id: %s, run_id: %s and map_index: %s is not found",
self.task_instance.dag_id,
self.task_instance.task_id,
self.task_instance.run_id,
self.task_instance.map_index,
)
return task_state
async def safe_to_cancel(self) -> bool:
"""
Whether it is safe to cancel the external job which is being executed by this trigger.
This is to avoid the case that `asyncio.CancelledError` is called because the trigger itself is stopped.
Because in those cases, we should NOT cancel the external job.
"""
if AIRFLOW_V_3_0_PLUS:
task_state = await self.get_task_state()
else:
# Database query is needed to get the latest state of the task instance.
task_instance = self.get_task_instance() # type: ignore[call-arg]
task_state = task_instance.state
return task_state != TaskInstanceState.DEFERRED
async def run(self) -> AsyncIterator[TriggerEvent]:
"""Get current job execution status and yields a TriggerEvent."""
hook = self._get_async_hook()
try:
while True:
job_status = await hook.get_job_status(
job_id=self.job_id, project_id=self.project_id, location=self.location
)
if job_status["status"] == "success":
self.log.info("BigQuery Job succeeded")
yield TriggerEvent(
{
"job_id": self.job_id,
"status": job_status["status"],
"message": job_status["message"],
}
)
return
elif job_status["status"] == "error":
self.log.info("BigQuery Job failed: %s", job_status)
yield TriggerEvent(
{
"status": job_status["status"],
"message": job_status["message"],
}
)
return
else:
self.log.info(
"Bigquery job status is %s. Sleeping for %s seconds.",
job_status["status"],
self.poll_interval,
)
await asyncio.sleep(self.poll_interval)
except asyncio.CancelledError:
if self.job_id and self.cancel_on_kill and await self.safe_to_cancel():
self.log.info(
"The job is safe to cancel the as airflow TaskInstance is not in deferred state."
)
self.log.info(
"Cancelling job. Project ID: %s, Location: %s, Job ID: %s",
self.project_id,
self.location,
self.job_id,
)
await hook.cancel_job(job_id=self.job_id, project_id=self.project_id, location=self.location)
else:
self.log.info(
"Trigger may have shutdown. Skipping to cancel job because the airflow "
"task is not cancelled yet: Project ID: %s, Location:%s, Job ID:%s",
self.project_id,
self.location,
self.job_id,
)
except Exception as e:
self.log.exception("Exception occurred while checking for query completion")
yield TriggerEvent({"status": "error", "message": str(e)})
def _get_async_hook(self) -> BigQueryAsyncHook:
return BigQueryAsyncHook(gcp_conn_id=self.conn_id, impersonation_chain=self.impersonation_chain)
| BigQueryInsertJobTrigger |
python | pyparsing__pyparsing | tests/test_simple_unit.py | {
"start": 15369,
"end": 16217
} | class ____(PyparsingExpressionTestCase):
# do not make staticmethod
# @staticmethod
def compute_stats_parse_action(t):
# by the time this parse action is called, parsed numeric words
# have been converted to ints by a previous parse action, so
# they can be treated as ints
t["sum"] = sum(t)
t["ave"] = sum(t) / len(t)
t["min"] = min(t)
t["max"] = max(t)
tests = [
PyparsingTest(
desc="A parse action that adds new key-values",
expr=pp.pyparsing_common.integer[...].add_parse_action(
compute_stats_parse_action
),
text="27 1 14 22 89",
expected_list=[27, 1, 14, 22, 89],
expected_dict={"ave": 30.6, "max": 89, "min": 1, "sum": 153},
),
]
| TestResultsModifyingParseAction |
python | sqlalchemy__sqlalchemy | test/orm/test_expire.py | {
"start": 60195,
"end": 64146
} | class ____(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
Table(
"data",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("data", String(30)),
)
Table(
"data_fetched",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("data", String(30), FetchedValue()),
)
Table(
"data_defer",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("data", String(30)),
Column("data2", String(30)),
)
@classmethod
def setup_classes(cls):
class Data(cls.Comparable):
pass
class DataFetched(cls.Comparable):
pass
class DataDefer(cls.Comparable):
pass
@classmethod
def setup_mappers(cls):
cls.mapper_registry.map_imperatively(cls.classes.Data, cls.tables.data)
cls.mapper_registry.map_imperatively(
cls.classes.DataFetched,
cls.tables.data_fetched,
eager_defaults=False,
)
cls.mapper_registry.map_imperatively(
cls.classes.DataDefer,
cls.tables.data_defer,
properties={"data": deferred(cls.tables.data_defer.c.data)},
)
def test_attr_not_inserted(self):
Data = self.classes.Data
sess = fixture_session()
d1 = Data()
sess.add(d1)
sess.flush()
# we didn't insert a value for 'data',
# so its not in dict, but also when we hit it, it isn't
# expired because there's no column default on it or anything like that
assert "data" not in d1.__dict__
def go():
eq_(d1.data, None)
self.assert_sql_count(testing.db, go, 0)
def test_attr_not_inserted_expired(self):
Data = self.classes.Data
sess = fixture_session(autoflush=False)
d1 = Data()
sess.add(d1)
sess.flush()
assert "data" not in d1.__dict__
# with an expire, we emit
sess.expire(d1)
def go():
eq_(d1.data, None)
self.assert_sql_count(testing.db, go, 1)
def test_attr_not_inserted_fetched(self):
Data = self.classes.DataFetched
sess = fixture_session()
d1 = Data()
sess.add(d1)
sess.flush()
assert "data" not in d1.__dict__
def go():
eq_(d1.data, None)
self.assert_sql_count(testing.db, go, 1)
def test_cols_missing_in_load(self):
Data = self.classes.Data
with Session(testing.db) as sess, sess.begin():
d1 = Data(data="d1")
sess.add(d1)
sess = fixture_session()
d1 = sess.query(Data).from_statement(select(Data.id)).first()
# cols not present in the row are implicitly expired
def go():
eq_(d1.data, "d1")
self.assert_sql_count(testing.db, go, 1)
def test_deferred_cols_missing_in_load_state_reset(self):
Data = self.classes.DataDefer
with Session(testing.db) as sess, sess.begin():
d1 = Data(data="d1")
sess.add(d1)
with Session(testing.db) as sess:
d1 = (
sess.query(Data)
.from_statement(select(Data.id))
.options(undefer(Data.data))
.first()
)
d1.data = "d2"
# the deferred loader has to clear out any state
# on the col, including that 'd2' here
d1 = sess.query(Data).populate_existing().first()
def go():
eq_(d1.data, "d1")
self.assert_sql_count(testing.db, go, 1)
| LifecycleTest |
python | scrapy__scrapy | scrapy/utils/asyncio.py | {
"start": 7633,
"end": 9025
} | class ____:
"""An universal result for :func:`call_later`, wrapping either
:class:`asyncio.TimerHandle` or :class:`twisted.internet.base.DelayedCall`.
The provided API is close to the :class:`asyncio.TimerHandle` one: there is
no ``active()`` (as there is no such public API in
:class:`asyncio.TimerHandle`) but ``cancel()`` can be called on already
called or cancelled instances.
"""
_timer_handle: asyncio.TimerHandle | None = None
_delayed_call: DelayedCall | None = None
@classmethod
def from_asyncio(cls, timer_handle: asyncio.TimerHandle) -> Self:
"""Create a CallLaterResult from an asyncio TimerHandle."""
o = cls()
o._timer_handle = timer_handle
return o
@classmethod
def from_twisted(cls, delayed_call: DelayedCall) -> Self:
"""Create a CallLaterResult from a Twisted DelayedCall."""
o = cls()
o._delayed_call = delayed_call
return o
def cancel(self) -> None:
"""Cancel the underlying delayed call.
Does nothing if the delayed call was already called or cancelled.
"""
if self._timer_handle:
self._timer_handle.cancel()
self._timer_handle = None
elif self._delayed_call and self._delayed_call.active():
self._delayed_call.cancel()
self._delayed_call = None
| CallLaterResult |
python | numpy__numpy | numpy/_core/tests/test_umath.py | {
"start": 84499,
"end": 86071
} | class ____:
@pytest.mark.parametrize("stride", [-4, -2, -1, 1, 2, 4])
@pytest.mark.parametrize("dtype", ['f', 'd'])
@pytest.mark.skipif(not sys.platform.startswith('linux'),
reason="np.frexp gives different answers for NAN/INF on windows and linux")
@pytest.mark.xfail(IS_MUSL, reason="gh23049")
def test_frexp(self, dtype, stride):
arr = np.array([np.nan, np.nan, np.inf, -np.inf, 0.0, -0.0, 1.0, -1.0], dtype=dtype)
mant_true = np.array([np.nan, np.nan, np.inf, -np.inf, 0.0, -0.0, 0.5, -0.5], dtype=dtype)
exp_true = np.array([0, 0, 0, 0, 0, 0, 1, 1], dtype='i')
out_mant = np.ones(8, dtype=dtype)
out_exp = 2 * np.ones(8, dtype='i')
mant, exp = np.frexp(arr[::stride], out=(out_mant[::stride], out_exp[::stride]))
assert_equal(mant_true[::stride], mant)
assert_equal(exp_true[::stride], exp)
assert_equal(out_mant[::stride], mant_true[::stride])
assert_equal(out_exp[::stride], exp_true[::stride])
# func : [maxulperror, low, high]
avx_ufuncs = {'sqrt' : [1, 0., 100.], # noqa: E203
'absolute' : [0, -100., 100.], # noqa: E203
'reciprocal' : [1, 1., 100.], # noqa: E203
'square' : [1, -100., 100.], # noqa: E203
'rint' : [0, -100., 100.], # noqa: E203
'floor' : [0, -100., 100.], # noqa: E203
'ceil' : [0, -100., 100.], # noqa: E203
'trunc' : [0, -100., 100.]} # noqa: E203
| TestFRExp |
python | pypa__setuptools | pkg_resources/tests/test_working_set.py | {
"start": 1291,
"end": 8602
} | class ____:
def __init__(self, installable_dists) -> None:
self._installable_dists = installable_dists
def __call__(self, req):
return next(
iter(filter(lambda dist: dist in req, self._installable_dists)), None
)
def parametrize_test_working_set_resolve(*test_list):
idlist = []
argvalues = []
for test in test_list:
(
name,
installed_dists,
installable_dists,
requirements,
expected1,
expected2,
) = (
strip_comments(s.lstrip())
for s in textwrap.dedent(test).lstrip().split('\n\n', 5)
)
installed_dists = list(parse_distributions(installed_dists))
installable_dists = list(parse_distributions(installable_dists))
requirements = list(pkg_resources.parse_requirements(requirements))
for id_, replace_conflicting, expected in (
(name, False, expected1),
(name + '_replace_conflicting', True, expected2),
):
idlist.append(id_)
expected = strip_comments(expected.strip())
if re.match(r'\w+$', expected):
expected = getattr(pkg_resources, expected)
assert issubclass(expected, Exception)
else:
expected = list(parse_distributions(expected))
argvalues.append(
pytest.param(
installed_dists,
installable_dists,
requirements,
replace_conflicting,
expected,
)
)
return pytest.mark.parametrize(
(
"installed_dists",
"installable_dists",
"requirements",
"replace_conflicting",
"resolved_dists_or_exception",
),
argvalues,
ids=idlist,
)
@parametrize_test_working_set_resolve(
"""
# id
noop
# installed
# installable
# wanted
# resolved
# resolved [replace conflicting]
""",
"""
# id
already_installed
# installed
foo-3.0
# installable
# wanted
foo>=2.1,!=3.1,<4
# resolved
foo-3.0
# resolved [replace conflicting]
foo-3.0
""",
"""
# id
installable_not_installed
# installed
# installable
foo-3.0
foo-4.0
# wanted
foo>=2.1,!=3.1,<4
# resolved
foo-3.0
# resolved [replace conflicting]
foo-3.0
""",
"""
# id
not_installable
# installed
# installable
# wanted
foo>=2.1,!=3.1,<4
# resolved
DistributionNotFound
# resolved [replace conflicting]
DistributionNotFound
""",
"""
# id
no_matching_version
# installed
# installable
foo-3.1
# wanted
foo>=2.1,!=3.1,<4
# resolved
DistributionNotFound
# resolved [replace conflicting]
DistributionNotFound
""",
"""
# id
installable_with_installed_conflict
# installed
foo-3.1
# installable
foo-3.5
# wanted
foo>=2.1,!=3.1,<4
# resolved
VersionConflict
# resolved [replace conflicting]
foo-3.5
""",
"""
# id
not_installable_with_installed_conflict
# installed
foo-3.1
# installable
# wanted
foo>=2.1,!=3.1,<4
# resolved
VersionConflict
# resolved [replace conflicting]
DistributionNotFound
""",
"""
# id
installed_with_installed_require
# installed
foo-3.9
baz-0.1
foo>=2.1,!=3.1,<4
# installable
# wanted
baz
# resolved
foo-3.9
baz-0.1
# resolved [replace conflicting]
foo-3.9
baz-0.1
""",
"""
# id
installed_with_conflicting_installed_require
# installed
foo-5
baz-0.1
foo>=2.1,!=3.1,<4
# installable
# wanted
baz
# resolved
VersionConflict
# resolved [replace conflicting]
DistributionNotFound
""",
"""
# id
installed_with_installable_conflicting_require
# installed
foo-5
baz-0.1
foo>=2.1,!=3.1,<4
# installable
foo-2.9
# wanted
baz
# resolved
VersionConflict
# resolved [replace conflicting]
baz-0.1
foo-2.9
""",
"""
# id
installed_with_installable_require
# installed
baz-0.1
foo>=2.1,!=3.1,<4
# installable
foo-3.9
# wanted
baz
# resolved
foo-3.9
baz-0.1
# resolved [replace conflicting]
foo-3.9
baz-0.1
""",
"""
# id
installable_with_installed_require
# installed
foo-3.9
# installable
baz-0.1
foo>=2.1,!=3.1,<4
# wanted
baz
# resolved
foo-3.9
baz-0.1
# resolved [replace conflicting]
foo-3.9
baz-0.1
""",
"""
# id
installable_with_installable_require
# installed
# installable
foo-3.9
baz-0.1
foo>=2.1,!=3.1,<4
# wanted
baz
# resolved
foo-3.9
baz-0.1
# resolved [replace conflicting]
foo-3.9
baz-0.1
""",
"""
# id
installable_with_conflicting_installable_require
# installed
foo-5
# installable
foo-2.9
baz-0.1
foo>=2.1,!=3.1,<4
# wanted
baz
# resolved
VersionConflict
# resolved [replace conflicting]
baz-0.1
foo-2.9
""",
"""
# id
conflicting_installables
# installed
# installable
foo-2.9
foo-5.0
# wanted
foo>=2.1,!=3.1,<4
foo>=4
# resolved
VersionConflict
# resolved [replace conflicting]
VersionConflict
""",
"""
# id
installables_with_conflicting_requires
# installed
# installable
foo-2.9
dep==1.0
baz-5.0
dep==2.0
dep-1.0
dep-2.0
# wanted
foo
baz
# resolved
VersionConflict
# resolved [replace conflicting]
VersionConflict
""",
"""
# id
installables_with_conflicting_nested_requires
# installed
# installable
foo-2.9
dep1
dep1-1.0
subdep<1.0
baz-5.0
dep2
dep2-1.0
subdep>1.0
subdep-0.9
subdep-1.1
# wanted
foo
baz
# resolved
VersionConflict
# resolved [replace conflicting]
VersionConflict
""",
"""
# id
wanted_normalized_name_installed_canonical
# installed
foo.bar-3.6
# installable
# wanted
foo-bar==3.6
# resolved
foo.bar-3.6
# resolved [replace conflicting]
foo.bar-3.6
""",
)
def test_working_set_resolve(
installed_dists,
installable_dists,
requirements,
replace_conflicting,
resolved_dists_or_exception,
):
ws = pkg_resources.WorkingSet([])
list(map(ws.add, installed_dists))
resolve_call = functools.partial(
ws.resolve,
requirements,
installer=FakeInstaller(installable_dists),
replace_conflicting=replace_conflicting,
)
if inspect.isclass(resolved_dists_or_exception):
with pytest.raises(resolved_dists_or_exception):
resolve_call()
else:
assert sorted(resolve_call()) == sorted(resolved_dists_or_exception)
| FakeInstaller |
python | charliermarsh__ruff | crates/ruff_python_formatter/resources/test/fixtures/ruff/docstring_code_examples_dynamic_line_width.py | {
"start": 8250,
"end": 10301
} | class ____:
def example2():
"""
Regular docstring of class method.
Examples
--------
>>> df = pl.DataFrame(
... {"foo": [1, 2, 3], "bar": [6, 7, 8], "ham": ["a", "b", "c"]}
... )
"""
# See: https://github.com/astral-sh/ruff/issues/9126
def doctest_extra_indent3():
"""
Pragma comment.
Examples
--------
>>> af1, af2, af3 = pl.align_frames(
... df1, df2, df3, on="dt"
... ) # doctest: +IGNORE_RESULT
"""
# See https://github.com/astral-sh/ruff/issues/13358
def length_doctest():
"""Get the length of the given list of numbers.
Args:
numbers: List of numbers.
Returns:
Integer length of the list of numbers.
Example:
>>> length([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20])
20
"""
def length_doctest_underindent():
"""Get the length of the given list of numbers.
Args:
numbers: List of numbers.
Returns:
Integer length of the list of numbers.
Example:
>>> length([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20])
20
"""
# See https://github.com/astral-sh/ruff/issues/13358
def length_markdown():
"""Get the length of the given list of numbers.
Args:
numbers: List of numbers.
Returns:
Integer length of the list of numbers.
Example:
```
length([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21])
```
"""
# See https://github.com/astral-sh/ruff/issues/13358
def length_rst():
"""
Do cool stuff::
length([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21])
"""
pass
# See https://github.com/astral-sh/ruff/issues/13358
def length_rst_in_section():
"""
Examples:
Do cool stuff::
length([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20])
"""
pass
| DoctestExtraIndent2 |
python | tensorflow__tensorflow | third_party/xla/xla/python_api/xla_shape.py | {
"start": 801,
"end": 5042
} | class ____(object):
"""Wraps a xla_data_pb2.ShapeProto message with a convenient Python type.
Provides direct access to the underlying xla_data_pb2.ShapeProto message in
the
message attribute, along with accessor wrappers to the message's fields.
Avoid direct access to .message unless interacting directly with protobuf APIs
like CopyFrom. In other words, prefer hauling the shape around in a Shape, and
only access .message when strictly required by the protobuf API.
"""
def __init__(self, element_type, dimensions, layout=None):
"""Creates a new XLA Shape.
Args:
element_type: element type from xla_data_pb2.
dimensions: sequence of dimensions sizes (integers), or sequence
of Shapes in the case of a tuple, i.e. when element_type is
TUPLE.
layout: optional minor_to_major sequence for layout. If not given, the
default major-to-minor layout is used.
Raises:
ValueError: if element_type is TUPLE but dimensions are not Shape objects.
"""
self.message = xla_data_pb2.ShapeProto()
self.message.element_type = element_type
if element_type == xla_data_pb2.TUPLE:
if not all(isinstance(subshape, Shape) for subshape in dimensions):
raise ValueError(
'XLA tuple requires sequence of Shape objects as dimensions')
self._tuple_shapes = tuple(dimensions)
for component_shape in self._tuple_shapes:
component_message = self.message.tuple_shapes.add()
component_message.CopyFrom(component_shape.message)
else:
self.message.dimensions.extend(dimensions)
if layout is None:
layout = list(reversed(range(len(dimensions))))
self.message.layout.minor_to_major.extend(layout)
def element_type(self):
return self.message.element_type
def is_tuple(self):
return self.element_type() == xla_data_pb2.TUPLE
def dimensions(self):
if self.is_tuple():
raise ValueError('Tuple shape has no dimensions. Try tuple_shapes()?')
return self.message.dimensions
def tuple_shapes(self):
"""If this is a tuple, returns its sequence of constituent Shape objects.
Returns:
Tuple sub-shapes.
Raises:
ValueError: if this is not a tuple.
"""
if not self.is_tuple():
raise ValueError('tuple_shapes() called on a non-tuple shape')
return self._tuple_shapes
def layout(self):
return self.message.layout
@staticmethod
def from_pyval(pyval):
return CreateShapeFromNumpy(pyval)
def _CreateShapeFromNumpy(ndarray): # pylint: disable=invalid-name
"""Create a Shape from a given Numpy array.
Args:
ndarray: Numpy array.
Returns:
A Shape object.
"""
element_type = types_.MAP_DTYPE_TO_RECORD[str(ndarray.dtype)].primitive_type
dimensions = ndarray.shape
# Set the shape's layout based on the ordering of ndarray.
# Numpy arrays come in two orders: Fortran (column-major) and C (row-major).
if _np.isfortran(ndarray):
# Column-major layout. This corresponds to a "dimension order is
# minor-to-major" layout in XLA.
layout = range(ndarray.ndim)
else:
# Row-major layout. This corresponds to a "dimension order is
# major-to-minor" layout int XLA.
layout = list(reversed(range(ndarray.ndim)))
return Shape(element_type, dimensions, layout)
def CreateShapeFromNumpy(value): # pylint: disable=invalid-name
"""Create a Shape from a Numpy array or a nested tuple structure thereof.
Args:
value: Numpy array or (possibly nested) tuple structure that bottoms out in
Numpy arrays.
Returns:
A Shape object.
"""
if isinstance(value, tuple):
return Shape(
xla_data_pb2.TUPLE,
[CreateShapeFromNumpy(component) for component in value])
else:
return _CreateShapeFromNumpy(value)
def CreateShapeFromDtypeAndTuple(dtype, shape_tuple): # pylint: disable=invalid-name
"""Create a shape from a Numpy dtype and a sequence of nonnegative integers.
Args:
dtype: a numpy dtype, e.g. np.dtype('int32').
shape_tuple: a sequence of nonnegative integers.
Returns:
A Shape object.
"""
element_type = types_.MAP_DTYPE_TO_RECORD[str(dtype)].primitive_type
return Shape(element_type, shape_tuple)
| Shape |
python | ray-project__ray | python/ray/experimental/channel/cpu_communicator.py | {
"start": 3351,
"end": 6791
} | class ____(Communicator):
"""
Uses a CPU-based communicator actor instead of an accelerator group like NCCL.
"""
def __init__(self, world_size: int, actor_handles: List["ray.actor.ActorHandle"]):
"""We use the op index to synchronize the sender and receiver at the
communicator actor."""
self._world_size = world_size
self._actor_handles = actor_handles
self.num_ops = defaultdict(int)
# For collective communication, one barrier will be created for
# each unique group of participants.
self.barriers = set()
self._rank = None
def send(self, tensor: "torch.Tensor", peer_rank: int):
# p2p operations are done via a shared memory channel, initialized in
# `create_channel` of `TorchTensorType`
pass
def recv(
self,
shape: Tuple[int],
dtype: "torch.dtype",
peer_rank: int,
allocator: Optional[TorchTensorAllocator] = None,
):
# See the comment on `send`
pass
def allgather(
self,
send_buf: "torch.Tensor",
recv_buf: "torch.Tensor",
):
raise NotImplementedError
def allreduce(
self,
send_buf: "torch.Tensor",
recv_buf: "torch.Tensor",
op: ReduceOp = ReduceOp.SUM,
):
all_ranks = [
self.get_rank(actor_handle) for actor_handle in self.get_actor_handles()
]
barrier_key = "barrier-collective-" + "-".join(map(str, sorted(all_ranks)))
barrier = CPUCommBarrier.options(name=barrier_key, get_if_exists=True).remote(
self._world_size
)
self.barriers.add(barrier)
result = ray.get(
barrier.wait_collective.remote(self.num_ops[barrier_key], send_buf, op)
)
assert recv_buf is not None, "Receiving buffer required for CPUCommunicator"
recv_buf[:] = result[:]
self.num_ops[barrier_key] += 1
def reducescatter(
self,
send_buf: "torch.Tensor",
recv_buf: "torch.Tensor",
op: ReduceOp = ReduceOp.SUM,
):
raise NotImplementedError
def destroy(self) -> None:
for barrier in self.barriers:
ray.kill(barrier)
def initialize(self, rank: int) -> None:
self._rank = rank
def get_actor_handles(self) -> List["ray.actor.ActorHandle"]:
return self._actor_handles
def get_rank(self, actor: ray.actor.ActorHandle) -> int:
"""
Return the given actor's rank in the CPU communicator.
Args:
actor: The actor handle to look up.
"""
actor_ids = [a._ray_actor_id for a in self._actor_handles]
try:
rank = actor_ids.index(actor._ray_actor_id)
except ValueError:
raise ValueError("Actor is not in the CPUCommunicator group.")
return rank
def get_self_rank(self) -> Optional[int]:
return self._rank
def get_world_size(self) -> int:
"""
Return the number of ranks in the CPU communicator.
"""
return self._world_size
def get_transport_name(self) -> str:
return "cpu"
def recv_stream(self):
raise NotImplementedError
def send_stream(self):
raise NotImplementedError
@classmethod
def generate_communicator_id(cls) -> str:
import uuid
return str(uuid.uuid4())
| CPUCommunicator |
python | patrick-kidger__equinox | equinox/internal/_loop/common.py | {
"start": 10696,
"end": 11863
} | class ____(Module):
# annotation removed because beartype can't handle the forward reference.
_array: Any # Union[Shaped[Array, "..."], _Buffer]
_pred: Bool[Array, ""]
_tag: object = field(static=True)
_makes_false_steps: bool = field(static=True)
def __getitem__(self, item):
return self._array[item]
def _op(self, pred, item, x, op, kwargs, makes_false_steps):
pred = pred & self._pred
if isinstance(self._array, _Buffer):
array = self._array._op(pred, item, x, op, kwargs, makes_false_steps)
else:
array = op(
pred,
self._array,
x,
item,
kwargs=kwargs,
makes_false_steps=makes_false_steps,
)
return _Buffer(array, self._pred, self._tag, self._makes_false_steps)
@property
def at(self):
return _BufferAt(self, self._makes_false_steps)
@property
def shape(self):
return self._array.shape
@property
def dtype(self):
return self._array.dtype
@property
def size(self):
return self._array.size
| _Buffer |
python | getsentry__sentry | src/sentry/replays/endpoints/project_replay_jobs_delete.py | {
"start": 853,
"end": 1330
} | class ____(Serializer):
def serialize(self, obj, attrs, user, **kwargs):
return {
"id": obj.id,
"dateCreated": obj.date_added,
"dateUpdated": obj.date_updated,
"rangeStart": obj.range_start,
"rangeEnd": obj.range_end,
"environments": obj.environments,
"status": obj.status,
"query": obj.query,
"countDeleted": obj.offset,
}
| ReplayDeletionJobSerializer |
python | run-llama__llama_index | llama-index-packs/llama-index-packs-resume-screener/llama_index/packs/resume_screener/base.py | {
"start": 1699,
"end": 2701
} | class ____(BaseLlamaPack):
def __init__(
self, job_description: str, criteria: List[str], llm: Optional[LLM] = None
) -> None:
self.reader = PDFReader()
llm = llm or OpenAI(model="gpt-4")
Settings.llm = llm
self.synthesizer = TreeSummarize(output_cls=ResumeScreenerDecision)
criteria_str = _format_criteria_str(criteria)
self.query = QUERY_TEMPLATE.format(
job_description=job_description, criteria_str=criteria_str
)
def get_modules(self) -> Dict[str, Any]:
"""Get modules."""
return {"reader": self.reader, "synthesizer": self.synthesizer}
def run(self, resume_path: str, *args: Any, **kwargs: Any) -> Any:
"""Run pack."""
docs = self.reader.load_data(Path(resume_path))
output = self.synthesizer.synthesize(
query=self.query,
nodes=[NodeWithScore(node=doc, score=1.0) for doc in docs],
)
return output.response
| ResumeScreenerPack |
python | getsentry__sentry | src/sentry/api/serializers/models/apiauthorization.py | {
"start": 233,
"end": 1126
} | class ____(Serializer):
def get_attrs(self, item_list, user, **kwargs):
apps = {
d["id"]: d
for d in serialize({i.application for i in item_list if i.application_id}, user)
}
attrs = {}
for item in item_list:
attrs[item] = {
"application": (apps.get(item.application.client_id) if item.application else None)
}
return attrs
def serialize(self, obj, attrs, user, **kwargs):
return {
"id": str(obj.id),
"scopes": obj.get_scopes(),
"application": attrs["application"],
"dateCreated": obj.date_added,
"organization": (
organization_service.serialize_organization(id=obj.organization_id)
if obj.organization_id
else None
),
}
| ApiAuthorizationSerializer |
python | doocs__leetcode | solution/0500-0599/0592.Fraction Addition and Subtraction/Solution.py | {
"start": 0,
"end": 617
} | class ____:
def fractionAddition(self, expression: str) -> str:
x, y = 0, 6 * 7 * 8 * 9 * 10
if expression[0].isdigit():
expression = '+' + expression
i, n = 0, len(expression)
while i < n:
sign = -1 if expression[i] == '-' else 1
i += 1
j = i
while j < n and expression[j] not in '+-':
j += 1
s = expression[i:j]
a, b = s.split('/')
x += sign * int(a) * y // int(b)
i = j
z = gcd(x, y)
x //= z
y //= z
return f'{x}/{y}'
| Solution |
python | run-llama__llama_index | llama-index-integrations/llms/llama-index-llms-yi/llama_index/llms/yi/base.py | {
"start": 1095,
"end": 6467
} | class ____(OpenAI):
"""
Yi LLM.
Examples:
`pip install llama-index-llms-yi`
```python
from llama_index.llms.yi import Yi
# get api key from: https://platform.01.ai/
llm = Yi(model="yi-large", api_key="YOUR_API_KEY")
response = llm.complete("Hi, who are you?")
print(response)
```
"""
model: str = Field(default=DEFAULT_YI_MODEL, description="The Yi model to use.")
context_window: int = Field(
default=yi_modelname_to_context_size(DEFAULT_YI_MODEL),
description=LLMMetadata.model_fields["context_window"].description,
)
is_chat_model: bool = Field(
default=True,
description=LLMMetadata.model_fields["is_chat_model"].description,
)
is_function_calling_model: bool = Field(
default=False,
description=LLMMetadata.model_fields["is_function_calling_model"].description,
)
tokenizer: Union[Tokenizer, str, None] = Field(
default=None,
description=(
"An instance of a tokenizer object that has an encode method, or the name"
" of a tokenizer model from Hugging Face. If left as None, then this"
" disables inference of max_tokens."
),
)
def __init__(
self,
model: str = DEFAULT_YI_MODEL,
api_key: Optional[str] = None,
api_base: Optional[str] = DEFAULT_YI_ENDPOINT,
**kwargs: Any,
) -> None:
api_key = api_key or os.environ.get("YI_API_KEY", None)
super().__init__(
model=model,
api_key=api_key,
api_base=api_base,
**kwargs,
)
@property
def metadata(self) -> LLMMetadata:
return LLMMetadata(
context_window=self.context_window,
num_output=self.max_tokens or -1,
is_chat_model=self.is_chat_model,
is_function_calling_model=self.is_function_calling_model,
model_name=self.model,
)
@property
def _tokenizer(self) -> Optional[Tokenizer]:
if isinstance(self.tokenizer, str):
return AutoTokenizer.from_pretrained(self.tokenizer)
return self.tokenizer
@classmethod
def class_name(cls) -> str:
return "Yi_LLM"
def complete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> CompletionResponse:
"""Complete the prompt."""
if not formatted:
prompt = self.completion_to_prompt(prompt)
return super().complete(prompt, **kwargs)
def stream_complete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> CompletionResponseGen:
"""Stream complete the prompt."""
if not formatted:
prompt = self.completion_to_prompt(prompt)
return super().stream_complete(prompt, **kwargs)
def chat(self, messages: Sequence[ChatMessage], **kwargs: Any) -> ChatResponse:
"""Chat with the model."""
if not self.metadata.is_chat_model:
prompt = self.messages_to_prompt(messages)
completion_response = self.complete(prompt, formatted=True, **kwargs)
return completion_response_to_chat_response(completion_response)
return super().chat(messages, **kwargs)
def stream_chat(
self, messages: Sequence[ChatMessage], **kwargs: Any
) -> ChatResponseGen:
if not self.metadata.is_chat_model:
prompt = self.messages_to_prompt(messages)
completion_response = self.stream_complete(prompt, formatted=True, **kwargs)
return stream_completion_response_to_chat_response(completion_response)
return super().stream_chat(messages, **kwargs)
# -- Async methods --
async def acomplete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> CompletionResponse:
"""Complete the prompt."""
if not formatted:
prompt = self.completion_to_prompt(prompt)
return await super().acomplete(prompt, **kwargs)
async def astream_complete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> CompletionResponseAsyncGen:
"""Stream complete the prompt."""
if not formatted:
prompt = self.completion_to_prompt(prompt)
return await super().astream_complete(prompt, **kwargs)
async def achat(
self, messages: Sequence[ChatMessage], **kwargs: Any
) -> ChatResponse:
"""Chat with the model."""
if not self.metadata.is_chat_model:
prompt = self.messages_to_prompt(messages)
completion_response = await self.acomplete(prompt, formatted=True, **kwargs)
return completion_response_to_chat_response(completion_response)
return await super().achat(messages, **kwargs)
async def astream_chat(
self, messages: Sequence[ChatMessage], **kwargs: Any
) -> ChatResponseAsyncGen:
if not self.metadata.is_chat_model:
prompt = self.messages_to_prompt(messages)
completion_response = await self.astream_complete(
prompt, formatted=True, **kwargs
)
return async_stream_completion_response_to_chat_response(
completion_response
)
return await super().astream_chat(messages, **kwargs)
| Yi |
python | keras-team__keras | keras/src/layers/preprocessing/image_preprocessing/random_posterization.py | {
"start": 243,
"end": 5167
} | class ____(BaseImagePreprocessingLayer):
"""Reduces the number of bits for each color channel.
**Note:** This layer is safe to use inside a `tf.data` or `grain` pipeline
(independently of which backend you're using).
References:
- [AutoAugment: Learning Augmentation Policies from Data](https://arxiv.org/abs/1805.09501)
- [RandAugment: Practical automated data augmentation with a reduced search space](https://arxiv.org/abs/1909.13719)
Args:
value_range: a tuple or a list of two elements. The first value
represents the lower bound for values in passed images, the second
represents the upper bound. Images passed to the layer should have
values within `value_range`. Defaults to `(0, 255)`.
factor: integer, the number of bits to keep for each channel. Must be a
value between 1-8.
"""
_USE_BASE_FACTOR = False
_FACTOR_BOUNDS = (1, 8)
_MAX_FACTOR = 8
_VALUE_RANGE_VALIDATION_ERROR = (
"The `value_range` argument should be a list of two numbers. "
)
def __init__(
self,
factor,
value_range=(0, 255),
data_format=None,
seed=None,
**kwargs,
):
super().__init__(data_format=data_format, **kwargs)
self._set_factor(factor)
self._set_value_range(value_range)
self.seed = seed
self.generator = self.backend.random.SeedGenerator(seed)
def _set_value_range(self, value_range):
if not isinstance(value_range, (tuple, list)):
raise ValueError(
self._VALUE_RANGE_VALIDATION_ERROR
+ f"Received: value_range={value_range}"
)
if len(value_range) != 2:
raise ValueError(
self._VALUE_RANGE_VALIDATION_ERROR
+ f"Received: value_range={value_range}"
)
self.value_range = sorted(value_range)
def get_random_transformation(self, data, training=True, seed=None):
if isinstance(data, dict):
images = data["images"]
else:
images = data
images_shape = self.backend.shape(images)
rank = len(images_shape)
if rank == 3:
batch_size = 1
elif rank == 4:
batch_size = images_shape[0]
else:
raise ValueError(
"Expected the input image to be rank 3 or 4. Received: "
f"inputs.shape={images_shape}"
)
if seed is None:
seed = self._get_seed_generator(self.backend._backend)
if self.factor[0] != self.factor[1]:
factor = self.backend.random.randint(
(batch_size,),
minval=self.factor[0],
maxval=self.factor[1],
seed=seed,
dtype="uint8",
)
else:
factor = (
self.backend.numpy.ones((batch_size,), dtype="uint8")
* self.factor[0]
)
shift_factor = self._MAX_FACTOR - factor
return {"shift_factor": shift_factor}
def transform_images(self, images, transformation=None, training=True):
if training:
shift_factor = transformation["shift_factor"]
shift_factor = self.backend.numpy.reshape(
shift_factor, self.backend.shape(shift_factor) + (1, 1, 1)
)
images = self._transform_value_range(
images,
original_range=self.value_range,
target_range=(0, 255),
dtype=self.compute_dtype,
)
images = self.backend.cast(images, "uint8")
images = self.backend.numpy.bitwise_left_shift(
self.backend.numpy.bitwise_right_shift(images, shift_factor),
shift_factor,
)
images = self.backend.cast(images, self.compute_dtype)
images = self._transform_value_range(
images,
original_range=(0, 255),
target_range=self.value_range,
dtype=self.compute_dtype,
)
return images
def transform_labels(self, labels, transformation, training=True):
return labels
def transform_segmentation_masks(
self, segmentation_masks, transformation, training=True
):
return segmentation_masks
def transform_bounding_boxes(
self, bounding_boxes, transformation, training=True
):
return bounding_boxes
def get_config(self):
config = super().get_config()
config.update(
{
"factor": self.factor,
"value_range": self.value_range,
"seed": self.seed,
}
)
return config
def compute_output_shape(self, input_shape):
return input_shape
| RandomPosterization |
python | pytorch__pytorch | torch/ao/nn/quantized/modules/activation.py | {
"start": 5031,
"end": 5885
} | class ____(torch.nn.Sigmoid):
r"""This is the quantized equivalent of :class:`~torch.nn.Sigmoid`.
Args:
scale: quantization scale of the output tensor
zero_point: quantization zero point of the output tensor
"""
def __init__(self, output_scale: float, output_zero_point: int):
super().__init__()
self.output_scale = output_scale
self.output_zero_point = output_zero_point
def forward(self, input):
return torch.ops.quantized.sigmoid(
input, self.output_scale, self.output_zero_point
)
@classmethod
def from_float(cls, mod, use_precomputed_fake_quant=False):
(
output_scale,
output_zero_point,
) = mod.activation_post_process.calculate_qparams()
return cls(float(output_scale), int(output_zero_point))
| Sigmoid |
python | sqlalchemy__sqlalchemy | test/typing/plain_files/orm/complete_orm_no_plugin.py | {
"start": 701,
"end": 1260
} | class ____(Base):
__table__ = Table(
"a",
Base.metadata,
Column("id", Integer, primary_key=True),
Column("data", String),
)
__mapper_args__: Mapping[str, Any] = {
"properties": {"bs": relationship("B")}
}
id: Mapped[int]
data: Mapped[str]
bs: "Mapped[List[B]]"
def __init__(
self,
id: Optional[int] = None, # noqa: A002
data: Optional[str] = None,
bs: "Optional[List[B]]" = None,
):
self.registry.constructor(self, id=id, data=data, bs=bs)
| A |
python | pallets__quart | src/quart/wrappers/response.py | {
"start": 2928,
"end": 3607
} | class ____(ResponseBody):
def __init__(self, iterable: AsyncIterable[Any] | Iterable[Any]) -> None:
self.iter: AsyncIterator[Any]
if isinstance(iterable, Iterable):
self.iter = run_sync_iterable(iter(iterable))
else:
self.iter = iterable.__aiter__() # Can't use aiter() until 3.10
async def __aenter__(self) -> IterableBody:
return self
async def __aexit__(
self, exc_type: type, exc_value: BaseException, tb: TracebackType
) -> None:
if hasattr(self.iter, "aclose"):
await self.iter.aclose()
def __aiter__(self) -> AsyncIterator[Any]:
return self.iter
| IterableBody |
python | apache__thrift | test/py/TestClient.py | {
"start": 13710,
"end": 13888
} | class ____(MultiplexedOptionalTest):
def get_protocol(self, transport):
return make_pedantic(TBinaryProtocol.TBinaryProtocolFactory().getProtocol(transport))
| BinaryTest |
python | huggingface__transformers | src/transformers/models/mobilevit/modeling_mobilevit.py | {
"start": 22532,
"end": 25541
} | class ____(MobileViTPreTrainedModel):
def __init__(self, config: MobileViTConfig, expand_output: bool = True):
r"""
expand_output (`bool`, *optional*, defaults to `True`):
Whether to expand the output of the model using a 1x1 convolution. If `True`, the model will apply an additional
1x1 convolution to expand the output channels from `config.neck_hidden_sizes[5]` to `config.neck_hidden_sizes[6]`.
"""
super().__init__(config)
self.config = config
self.expand_output = expand_output
self.conv_stem = MobileViTConvLayer(
config,
in_channels=config.num_channels,
out_channels=config.neck_hidden_sizes[0],
kernel_size=3,
stride=2,
)
self.encoder = MobileViTEncoder(config)
if self.expand_output:
self.conv_1x1_exp = MobileViTConvLayer(
config,
in_channels=config.neck_hidden_sizes[5],
out_channels=config.neck_hidden_sizes[6],
kernel_size=1,
)
# Initialize weights and apply final processing
self.post_init()
@auto_docstring
def forward(
self,
pixel_values: Optional[torch.Tensor] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[tuple, BaseModelOutputWithPoolingAndNoAttention]:
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("You have to specify pixel_values")
embedding_output = self.conv_stem(pixel_values)
encoder_outputs = self.encoder(
embedding_output,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
if self.expand_output:
last_hidden_state = self.conv_1x1_exp(encoder_outputs[0])
# global average pooling: (batch_size, channels, height, width) -> (batch_size, channels)
pooled_output = torch.mean(last_hidden_state, dim=[-2, -1], keepdim=False)
else:
last_hidden_state = encoder_outputs[0]
pooled_output = None
if not return_dict:
output = (last_hidden_state, pooled_output) if pooled_output is not None else (last_hidden_state,)
return output + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=last_hidden_state,
pooler_output=pooled_output,
hidden_states=encoder_outputs.hidden_states,
)
@auto_docstring(
custom_intro="""
MobileViT model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
"""
)
| MobileViTModel |
python | walkccc__LeetCode | solutions/2952. Minimum Number of Coins to be Added/2952.py | {
"start": 0,
"end": 510
} | class ____:
# Same as 330. Patching Array
def minimumAddedCoins(self, coins: list[int], target: int) -> int:
ans = 0
i = 0 # coins' index
miss = 1 # the minimum sum in [1, n] we might miss
coins.sort()
while miss <= target:
if i < len(coins) and coins[i] <= miss:
miss += coins[i]
i += 1
else:
# Greedily add `miss` itself to increase the range from
# [1, miss) to [1, 2 * miss).
miss += miss
ans += 1
return ans
| Solution |
python | PrefectHQ__prefect | src/prefect/blocks/abstract.py | {
"start": 6085,
"end": 11643
} | class ____(Block, ABC):
"""
An abstract block type that represents a database and
provides an interface for interacting with it.
Blocks that implement this interface have the option to accept
credentials directly via attributes or via a nested `CredentialsBlock`.
Use of a nested credentials block is recommended unless credentials
are tightly coupled to database connection configuration.
Implementing either sync or async context management on `DatabaseBlock`
implementations is recommended.
"""
@property
def logger(self) -> LoggerOrAdapter:
"""
Returns a logger based on whether the DatabaseBlock
is called from within a flow or task run context.
If a run context is present, the logger property returns a run logger.
Else, it returns a default logger labeled with the class's name.
Returns:
The run logger or a default logger with the class's name.
"""
try:
return get_run_logger()
except MissingContextError:
return get_logger(self.__class__.__name__)
@abstractmethod
async def fetch_one(
self,
operation: str,
parameters: dict[str, Any] | None = None,
**execution_kwargs: Any,
) -> tuple[Any, ...]:
"""
Fetch a single result from the database.
Args:
operation: The SQL query or other operation to be executed.
parameters: The parameters for the operation.
**execution_kwargs: Additional keyword arguments to pass to execute.
Returns:
A list of tuples containing the data returned by the database,
where each row is a tuple and each column is a value in the tuple.
"""
@abstractmethod
async def fetch_many(
self,
operation: str,
parameters: dict[str, Any] | None = None,
size: int | None = None,
**execution_kwargs: Any,
) -> list[tuple[Any, ...]]:
"""
Fetch a limited number of results from the database.
Args:
operation: The SQL query or other operation to be executed.
parameters: The parameters for the operation.
size: The number of results to return.
**execution_kwargs: Additional keyword arguments to pass to execute.
Returns:
A list of tuples containing the data returned by the database,
where each row is a tuple and each column is a value in the tuple.
"""
@abstractmethod
async def fetch_all(
self,
operation: str,
parameters: dict[str, Any] | None = None,
**execution_kwargs: Any,
) -> list[tuple[Any, ...]]:
"""
Fetch all results from the database.
Args:
operation: The SQL query or other operation to be executed.
parameters: The parameters for the operation.
**execution_kwargs: Additional keyword arguments to pass to execute.
Returns:
A list of tuples containing the data returned by the database,
where each row is a tuple and each column is a value in the tuple.
"""
@abstractmethod
async def execute(
self,
operation: str,
parameters: dict[str, Any] | None = None,
**execution_kwargs: Any,
) -> None:
"""
Executes an operation on the database. This method is intended to be used
for operations that do not return data, such as INSERT, UPDATE, or DELETE.
Args:
operation: The SQL query or other operation to be executed.
parameters: The parameters for the operation.
**execution_kwargs: Additional keyword arguments to pass to execute.
"""
@abstractmethod
async def execute_many(
self,
operation: str,
seq_of_parameters: list[dict[str, Any]],
**execution_kwargs: Any,
) -> None:
"""
Executes multiple operations on the database. This method is intended to be used
for operations that do not return data, such as INSERT, UPDATE, or DELETE.
Args:
operation: The SQL query or other operation to be executed.
seq_of_parameters: The sequence of parameters for the operation.
**execution_kwargs: Additional keyword arguments to pass to execute.
"""
# context management methods are not abstract methods because
# they are not supported by all database drivers
async def __aenter__(self) -> Self:
"""
Context management method for async databases.
"""
raise NotImplementedError(
f"{self.__class__.__name__} does not support async context management."
)
async def __aexit__(self, *args: Any) -> None:
"""
Context management method for async databases.
"""
raise NotImplementedError(
f"{self.__class__.__name__} does not support async context management."
)
def __enter__(self) -> Self:
"""
Context management method for databases.
"""
raise NotImplementedError(
f"{self.__class__.__name__} does not support context management."
)
def __exit__(self, *args: Any) -> None:
"""
Context management method for databases.
"""
raise NotImplementedError(
f"{self.__class__.__name__} does not support context management."
)
| DatabaseBlock |
python | spack__spack | lib/spack/spack/directory_layout.py | {
"start": 13668,
"end": 13853
} | class ____(SpackError):
"""Superclass for directory layout errors."""
def __init__(self, message, long_msg=None):
super().__init__(message, long_msg)
| DirectoryLayoutError |
python | dagster-io__dagster | python_modules/dagster/dagster/components/resolved/core_models.py | {
"start": 2506,
"end": 4124
} | class ____(Resolvable, Model):
type: Literal["static"] = "static"
partition_keys: Sequence[str]
def resolve_partitions_def(context: ResolutionContext, model) -> Optional[PartitionsDefinition]:
if model is None:
return None
elif model.type == "hourly":
return HourlyPartitionsDefinition(
start_date=model.start_date,
end_date=model.end_date,
timezone=model.timezone,
minute_offset=model.minute_offset,
)
elif model.type == "daily":
return DailyPartitionsDefinition(
start_date=model.start_date,
end_date=model.end_date,
timezone=model.timezone,
minute_offset=model.minute_offset,
hour_offset=model.hour_offset,
)
elif model.type == "weekly":
return WeeklyPartitionsDefinition(
start_date=model.start_date,
end_date=model.end_date,
timezone=model.timezone,
minute_offset=model.minute_offset,
hour_offset=model.hour_offset,
day_offset=model.day_offset,
)
elif model.type == "time_window":
return TimeWindowPartitionsDefinition(
start=model.start_date,
end=model.end_date,
timezone=model.timezone,
fmt=model.fmt,
cron_schedule=model.cron_schedule,
)
elif model.type == "static":
return StaticPartitionsDefinition(partition_keys=model.partition_keys)
else:
raise ValueError(f"Invalid partitions definition type: {model.type}")
| StaticPartitionsDefinitionModel |
python | HypothesisWorks__hypothesis | hypothesis-python/src/hypothesis/control.py | {
"start": 5388,
"end": 13378
} | class ____:
def __init__(
self,
data: ConjectureData,
*,
is_final: bool = False,
wrapped_test: Callable,
) -> None:
self.data = data
self.tasks: list[Callable[[], Any]] = []
self.is_final = is_final
self.wrapped_test = wrapped_test
# Use defaultdict(list) here to handle the possibility of having multiple
# functions registered for the same object (due to caching, small ints, etc).
# The printer will discard duplicates which return different representations.
self.known_object_printers: dict[IDKey, list[PrettyPrintFunction]] = (
defaultdict(list)
)
def record_call(
self,
obj: object,
func: object,
*,
args: Sequence[object],
kwargs: dict[str, object],
) -> None:
self.known_object_printers[IDKey(obj)].append(
# _func=func prevents mypy from inferring lambda type. Would need
# paramspec I think - not worth it.
lambda obj, p, cycle, *, _func=func: p.maybe_repr_known_object_as_call( # type: ignore
obj, cycle, get_pretty_function_description(_func), args, kwargs
)
)
def prep_args_kwargs_from_strategies(self, kwarg_strategies):
arg_labels = {}
kwargs = {}
for k, s in kwarg_strategies.items():
start_idx = len(self.data.nodes)
with deprecate_random_in_strategy("from {}={!r}", k, s):
obj = self.data.draw(s, observe_as=f"generate:{k}")
end_idx = len(self.data.nodes)
kwargs[k] = obj
# This high up the stack, we can't see or really do much with the conjecture
# Example objects - not least because they're only materialized after the
# test case is completed. Instead, we'll stash the (start_idx, end_idx)
# pair on our data object for the ConjectureRunner engine to deal with, and
# pass a dict of such out so that the pretty-printer knows where to place
# the which-parts-matter comments later.
if start_idx != end_idx:
arg_labels[k] = (start_idx, end_idx)
self.data.arg_slices.add((start_idx, end_idx))
return kwargs, arg_labels
def __enter__(self):
self.assign_variable = _current_build_context.with_value(self)
self.assign_variable.__enter__()
return self
def __exit__(self, exc_type, exc_value, tb):
self.assign_variable.__exit__(exc_type, exc_value, tb)
errors = []
for task in self.tasks:
try:
task()
except BaseException as err:
errors.append(err)
if errors:
if len(errors) == 1:
raise errors[0] from exc_value
raise BaseExceptionGroup("Cleanup failed", errors) from exc_value
def cleanup(teardown):
"""Register a function to be called when the current test has finished
executing. Any exceptions thrown in teardown will be printed but not
rethrown.
Inside a test this isn't very interesting, because you can just use
a finally block, but note that you can use this inside map, flatmap,
etc. in order to e.g. insist that a value is closed at the end.
"""
context = _current_build_context.value
if context is None:
raise InvalidArgument("Cannot register cleanup outside of build context")
context.tasks.append(teardown)
def should_note():
context = _current_build_context.value
if context is None:
raise InvalidArgument("Cannot make notes outside of a test")
return context.is_final or settings.default.verbosity >= Verbosity.verbose
def note(value: object) -> None:
"""Report this value for the minimal failing example."""
if should_note():
if not isinstance(value, str):
value = pretty(value)
report(value)
def event(value: str, payload: str | int | float = "") -> None:
"""Record an event that occurred during this test. Statistics on the number of test
runs with each event will be reported at the end if you run Hypothesis in
statistics reporting mode.
Event values should be strings or convertible to them. If an optional
payload is given, it will be included in the string for :ref:`statistics`.
"""
context = _current_build_context.value
if context is None:
raise InvalidArgument("Cannot make record events outside of a test")
payload = _event_to_string(payload, (str, int, float))
context.data.events[_event_to_string(value)] = payload
_events_to_strings: WeakKeyDictionary = WeakKeyDictionary()
def _event_to_string(event, allowed_types=str):
if isinstance(event, allowed_types):
return event
try:
return _events_to_strings[event]
except (KeyError, TypeError):
pass
result = str(event)
try:
_events_to_strings[event] = result
except TypeError:
pass
return result
def target(observation: int | float, *, label: str = "") -> int | float:
"""Calling this function with an ``int`` or ``float`` observation gives it feedback
with which to guide our search for inputs that will cause an error, in
addition to all the usual heuristics. Observations must always be finite.
Hypothesis will try to maximize the observed value over several examples;
almost any metric will work so long as it makes sense to increase it.
For example, ``-abs(error)`` is a metric that increases as ``error``
approaches zero.
Example metrics:
- Number of elements in a collection, or tasks in a queue
- Mean or maximum runtime of a task (or both, if you use ``label``)
- Compression ratio for data (perhaps per-algorithm or per-level)
- Number of steps taken by a state machine
The optional ``label`` argument can be used to distinguish between
and therefore separately optimise distinct observations, such as the
mean and standard deviation of a dataset. It is an error to call
``target()`` with any label more than once per test case.
.. note::
The more examples you run, the better this technique works.
As a rule of thumb, the targeting effect is noticeable above
:obj:`max_examples=1000 <hypothesis.settings.max_examples>`,
and immediately obvious by around ten thousand examples
*per label* used by your test.
:ref:`statistics` include the best score seen for each label,
which can help avoid `the threshold problem
<https://hypothesis.works/articles/threshold-problem/>`__ when the minimal
example shrinks right down to the threshold of failure (:issue:`2180`).
"""
check_type((int, float), observation, "observation")
if not math.isfinite(observation):
raise InvalidArgument(f"{observation=} must be a finite float.")
check_type(str, label, "label")
context = _current_build_context.value
if context is None:
raise InvalidArgument(
"Calling target() outside of a test is invalid. "
"Consider guarding this call with `if currently_in_test_context(): ...`"
)
elif context.data.provider.avoid_realization:
# We could in principle realize this in the engine, but it seems more
# efficient to have our alternative backend optimize it for us.
# See e.g. https://github.com/pschanely/hypothesis-crosshair/issues/3
return observation # pragma: no cover
verbose_report(f"Saw target({observation!r}, {label=})")
if label in context.data.target_observations:
raise InvalidArgument(
f"Calling target({observation!r}, {label=}) would overwrite "
f"target({context.data.target_observations[label]!r}, {label=})"
)
else:
context.data.target_observations[label] = observation
return observation
| BuildContext |
python | aio-libs__aiohttp | aiohttp/client_exceptions.py | {
"start": 4686,
"end": 4892
} | class ____(ClientConnectorError):
"""DNS resolution failed during client connection.
Raised in :class:`aiohttp.connector.TCPConnector` if
DNS resolution fails.
"""
| ClientConnectorDNSError |
python | tensorflow__tensorflow | tensorflow/python/ops/math_ops_test.py | {
"start": 56321,
"end": 56995
} | class ____(test_util.TensorFlowTestCase):
def testCastWithFullType(self):
@def_function.function
def test_fn():
ta = tensor_array_ops.TensorArray(dtypes.int32, size=1)
h = math_ops.cast(ta.flow, dtypes.variant)
t = full_type_pb2.FullTypeDef(
type_id=full_type_pb2.TFT_PRODUCT,
args=[full_type_pb2.FullTypeDef(type_id=full_type_pb2.TFT_ARRAY)])
h.op.experimental_set_type(t)
ta = tensor_array_ops.TensorArray(dtypes.int32, flow=h)
ta = ta.write(0, constant_op.constant(1))
return ta.stack()
self.assertAllEqual(self.evaluate(test_fn()), [1])
if __name__ == "__main__":
googletest.main()
| CastTest |
python | redis__redis-py | tests/test_retry.py | {
"start": 4425,
"end": 5737
} | class ____:
"Test that Retry calls backoff and retries the expected number of times"
def setup_method(self, test_method):
self.actual_attempts = 0
self.actual_failures = 0
def _do(self):
self.actual_attempts += 1
raise ConnectionError()
def _fail(self, error):
self.actual_failures += 1
def _fail_inf(self, error):
self.actual_failures += 1
if self.actual_failures == 5:
raise ConnectionError()
@pytest.mark.parametrize("retries", range(10))
def test_retry(self, retries):
backoff = BackoffMock()
retry = Retry(backoff, retries)
with pytest.raises(ConnectionError):
retry.call_with_retry(self._do, self._fail)
assert self.actual_attempts == 1 + retries
assert self.actual_failures == 1 + retries
assert backoff.reset_calls == 1
assert backoff.calls == retries
def test_infinite_retry(self):
backoff = BackoffMock()
# specify infinite retries, but give up after 5
retry = Retry(backoff, -1)
with pytest.raises(ConnectionError):
retry.call_with_retry(self._do, self._fail_inf)
assert self.actual_attempts == 5
assert self.actual_failures == 5
@pytest.mark.onlynoncluster
| TestRetry |
python | davidhalter__jedi | test/completion/pep0484_generic_parameters.py | {
"start": 7397,
"end": 7676
} | class ____(Specialised):
pass
child_of_specialised_instance: ChildOfSpecialised = NotImplemented
#? int()
first(child_of_specialised_instance)
#? str()
values(child_of_specialised_instance)[0]
# Test that unbound generics are inferred as much as possible
| ChildOfSpecialised |
python | getsentry__sentry | tests/sentry/core/endpoints/test_organization_details.py | {
"start": 2873,
"end": 3364
} | class ____:
def has_scope(self, scope):
# For the "test_as_no_org_read_user" we need a set of scopes that allows GET on the
# OrganizationDetailsEndpoint to allow high-level access, but without "org:read" scope
# to cover that branch with test. The scope "org:write" is a good candidate for this.
if scope == "org:write":
return True
return False
@region_silo_test(regions=create_test_regions("us"), include_monolith_run=True)
| MockAccess |
python | Textualize__textual | tests/snapshot_tests/snapshot_apps/dock_scroll.py | {
"start": 80,
"end": 531
} | class ____(App):
BINDINGS = [("ctrl+q", "app.quit", "Quit")]
CSS = """
Label {
border: solid red;
}
Footer {
height: 4;
}
"""
def compose(self):
text = (
"this is a sample sentence and here are some words".replace(" ", "\n") * 2
)
yield Header()
yield Label(text)
yield Footer()
if __name__ == "__main__":
app = TestApp()
app.run()
| TestApp |
python | great-expectations__great_expectations | great_expectations/core/batch.py | {
"start": 19836,
"end": 22421
} | class ____(BatchRequestBase):
"""A BatchRequest is the way to specify which data Great Expectations will validate.
A Batch Request is provided to a Datasource in order to create a Batch.
---Documentation---
- https://docs.greatexpectations.io/docs/guides/connecting_to_your_data/how_to_get_one_or_more_batches_of_data_from_a_configured_datasource/#1-construct-a-batchrequest
- https://docs.greatexpectations.io/docs/terms/batch_request
The `data_connector_query` parameter can include an index slice:
```python
{
"index": "-3:"
}
```
or it can include a filter:
```python
{
"batch_filter_parameters": {"year": "2020"}
}
```
Args:
datasource_name: name of the Datasource used to connect to the data
data_connector_name: name of the DataConnector used to connect to the data
data_asset_name: name of the DataAsset used to connect to the data
data_connector_query: a dictionary of query parameters the DataConnector
should use to filter the batches returned from a BatchRequest
limit: if specified, the maximum number of *batches* to be returned
(limit does not affect the number of records in each batch)
batch_spec_passthrough: a dictionary of additional parameters that
the ExecutionEngine will use to obtain a specific set of data
Returns:
BatchRequest
"""
include_field_names: ClassVar[set[str]] = {
"datasource_name",
"data_connector_name",
"data_asset_name",
"data_connector_query",
"limit",
"batch_spec_passthrough",
}
def __init__( # noqa: PLR0913 # FIXME CoP
self,
datasource_name: str,
data_connector_name: str,
data_asset_name: str,
data_connector_query: dict | None = None,
limit: int | None = None,
batch_spec_passthrough: dict | None = None,
) -> None:
self._validate_init_parameters(
datasource_name=datasource_name,
data_connector_name=data_connector_name,
data_asset_name=data_asset_name,
data_connector_query=data_connector_query,
limit=limit,
)
super().__init__(
datasource_name=datasource_name,
data_connector_name=data_connector_name,
data_asset_name=data_asset_name,
data_connector_query=data_connector_query,
limit=limit,
batch_spec_passthrough=batch_spec_passthrough,
)
| BatchRequest |
python | dask__dask | dask/dataframe/dask_expr/_rolling.py | {
"start": 5394,
"end": 5450
} | class ____(RollingReduction):
how = "kurt"
| RollingKurt |
python | plotly__plotly.py | plotly/graph_objs/layout/smith/_domain.py | {
"start": 235,
"end": 5045
} | class ____(_BaseLayoutHierarchyType):
_parent_path_str = "layout.smith"
_path_str = "layout.smith.domain"
_valid_props = {"column", "row", "x", "y"}
@property
def column(self):
"""
If there is a layout grid, use the domain for this column in
the grid for this smith subplot .
The 'column' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [0, 9223372036854775807]
Returns
-------
int
"""
return self["column"]
@column.setter
def column(self, val):
self["column"] = val
@property
def row(self):
"""
If there is a layout grid, use the domain for this row in the
grid for this smith subplot .
The 'row' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [0, 9223372036854775807]
Returns
-------
int
"""
return self["row"]
@row.setter
def row(self, val):
self["row"] = val
@property
def x(self):
"""
Sets the horizontal domain of this smith subplot (in plot
fraction).
The 'x' property is an info array that may be specified as:
* a list or tuple of 2 elements where:
(0) The 'x[0]' property is a number and may be specified as:
- An int or float in the interval [0, 1]
(1) The 'x[1]' property is a number and may be specified as:
- An int or float in the interval [0, 1]
Returns
-------
list
"""
return self["x"]
@x.setter
def x(self, val):
self["x"] = val
@property
def y(self):
"""
Sets the vertical domain of this smith subplot (in plot
fraction).
The 'y' property is an info array that may be specified as:
* a list or tuple of 2 elements where:
(0) The 'y[0]' property is a number and may be specified as:
- An int or float in the interval [0, 1]
(1) The 'y[1]' property is a number and may be specified as:
- An int or float in the interval [0, 1]
Returns
-------
list
"""
return self["y"]
@y.setter
def y(self, val):
self["y"] = val
@property
def _prop_descriptions(self):
return """\
column
If there is a layout grid, use the domain for this
column in the grid for this smith subplot .
row
If there is a layout grid, use the domain for this row
in the grid for this smith subplot .
x
Sets the horizontal domain of this smith subplot (in
plot fraction).
y
Sets the vertical domain of this smith subplot (in plot
fraction).
"""
def __init__(self, arg=None, column=None, row=None, x=None, y=None, **kwargs):
"""
Construct a new Domain object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.layout.smith.Domain`
column
If there is a layout grid, use the domain for this
column in the grid for this smith subplot .
row
If there is a layout grid, use the domain for this row
in the grid for this smith subplot .
x
Sets the horizontal domain of this smith subplot (in
plot fraction).
y
Sets the vertical domain of this smith subplot (in plot
fraction).
Returns
-------
Domain
"""
super().__init__("domain")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.layout.smith.Domain
constructor must be a dict or
an instance of :class:`plotly.graph_objs.layout.smith.Domain`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("column", arg, column)
self._set_property("row", arg, row)
self._set_property("x", arg, x)
self._set_property("y", arg, y)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Domain |
python | django__django | django/db/models/fields/__init__.py | {
"start": 61073,
"end": 67298
} | class ____(Field):
empty_strings_allowed = False
default_error_messages = {
"invalid": _("“%(value)s” value must be a decimal number."),
}
description = _("Decimal number")
def __init__(
self,
verbose_name=None,
name=None,
max_digits=None,
decimal_places=None,
**kwargs,
):
self.max_digits, self.decimal_places = max_digits, decimal_places
super().__init__(verbose_name, name, **kwargs)
def check(self, **kwargs):
errors = super().check(**kwargs)
digits_errors = [
*self._check_decimal_places(),
*self._check_max_digits(),
]
if not digits_errors:
errors.extend(self._check_decimal_places_and_max_digits(**kwargs))
else:
errors.extend(digits_errors)
return errors
def _check_decimal_places(self):
if self.decimal_places is None:
if (
not connection.features.supports_no_precision_decimalfield
and "supports_no_precision_decimalfield"
not in self.model._meta.required_db_features
):
return [
checks.Error(
"DecimalFields must define a 'decimal_places' attribute.",
obj=self,
id="fields.E130",
)
]
elif self.max_digits is not None:
return [
checks.Error(
"DecimalField’s max_digits and decimal_places must both "
"be defined or both omitted.",
obj=self,
id="fields.E135",
),
]
else:
try:
decimal_places = int(self.decimal_places)
if decimal_places < 0:
raise ValueError()
except ValueError:
return [
checks.Error(
"'decimal_places' must be a non-negative integer.",
obj=self,
id="fields.E131",
)
]
return []
def _check_max_digits(self):
if self.max_digits is None:
if (
not connection.features.supports_no_precision_decimalfield
and "supports_no_precision_decimalfield"
not in self.model._meta.required_db_features
):
return [
checks.Error(
"DecimalFields must define a 'max_digits' attribute.",
obj=self,
id="fields.E132",
)
]
elif self.decimal_places is not None:
return [
checks.Error(
"DecimalField’s max_digits and decimal_places must both "
"be defined or both omitted.",
obj=self,
id="fields.E135",
),
]
else:
try:
max_digits = int(self.max_digits)
if max_digits <= 0:
raise ValueError()
except ValueError:
return [
checks.Error(
"'max_digits' must be a positive integer.",
obj=self,
id="fields.E133",
)
]
return []
def _check_decimal_places_and_max_digits(self, **kwargs):
if self.decimal_places is None or self.max_digits is None:
return []
if int(self.decimal_places) > int(self.max_digits):
return [
checks.Error(
"'max_digits' must be greater or equal to 'decimal_places'.",
obj=self,
id="fields.E134",
)
]
return []
@cached_property
def validators(self):
return [
*super().validators,
validators.DecimalValidator(self.max_digits, self.decimal_places),
]
@cached_property
def context(self):
return decimal.Context(prec=self.max_digits)
def deconstruct(self):
name, path, args, kwargs = super().deconstruct()
if self.max_digits is not None:
kwargs["max_digits"] = self.max_digits
if self.decimal_places is not None:
kwargs["decimal_places"] = self.decimal_places
return name, path, args, kwargs
def get_internal_type(self):
return "DecimalField"
def to_python(self, value):
if value is None:
return value
try:
if isinstance(value, float):
decimal_value = self.context.create_decimal_from_float(value)
else:
decimal_value = decimal.Decimal(value)
except (decimal.InvalidOperation, TypeError, ValueError):
raise exceptions.ValidationError(
self.error_messages["invalid"],
code="invalid",
params={"value": value},
)
if not decimal_value.is_finite():
raise exceptions.ValidationError(
self.error_messages["invalid"],
code="invalid",
params={"value": value},
)
return decimal_value
def get_db_prep_value(self, value, connection, prepared=False):
value = super().get_db_prep_value(value, connection, prepared)
return connection.ops.adapt_decimalfield_value(
self.to_python(value), self.max_digits, self.decimal_places
)
def get_prep_value(self, value):
value = super().get_prep_value(value)
return self.to_python(value)
def formfield(self, **kwargs):
return super().formfield(
**{
"max_digits": self.max_digits,
"decimal_places": self.decimal_places,
"form_class": forms.DecimalField,
**kwargs,
}
)
| DecimalField |
python | crytic__slither | slither/core/declarations/pragma_directive.py | {
"start": 181,
"end": 1174
} | class ____(SourceMapping):
def __init__(self, directive: List[str], scope: "FileScope") -> None:
super().__init__()
self._directive = directive
self.scope: "FileScope" = scope
self._pattern = "pragma"
@property
def directive(self) -> List[str]:
"""
list(str)
"""
return self._directive
@property
def version(self) -> str:
return "".join(self.directive[1:])
@property
def name(self) -> str:
return self.version
@property
def is_solidity_version(self) -> bool:
if len(self._directive) > 0:
return self._directive[0].lower() == "solidity"
return False
@property
def is_abi_encoder_v2(self) -> bool:
if len(self._directive) == 2:
return self._directive[0] == "experimental" and self._directive[1] == "ABIEncoderV2"
return False
def __str__(self) -> str:
return "pragma " + "".join(self.directive)
| Pragma |
python | encode__httpx | httpx/_exceptions.py | {
"start": 2238,
"end": 2845
} | class ____(HTTPError):
"""
Base class for all exceptions that may occur when issuing a `.request()`.
"""
def __init__(self, message: str, *, request: Request | None = None) -> None:
super().__init__(message)
# At the point an exception is raised we won't typically have a request
# instance to associate it with.
#
# The 'request_context' context manager is used within the Client and
# Response methods in order to ensure that any raised exceptions
# have a `.request` property set on them.
self._request = request
| RequestError |
python | mkdocs__mkdocs | mkdocs/config/config_options.py | {
"start": 24451,
"end": 25010
} | class ____(Dir):
def post_validation(self, config: Config, key_name: str):
if not config.config_file_path:
return
# Validate that the dir is not the parent dir of the config file.
if os.path.dirname(config.config_file_path) == config[key_name]:
raise ValidationError(
f"The '{key_name}' should not be the parent directory of the"
f" config file. Use a child directory instead so that the"
f" '{key_name}' is a sibling of the config file."
)
| DocsDir |
python | ansible__ansible | test/lib/ansible_test/_internal/host_configs.py | {
"start": 917,
"end": 1881
} | class ____(PosixCompletionConfig):
"""Pseudo completion config for the origin."""
def __init__(self) -> None:
super().__init__(name='origin')
@property
def supported_pythons(self) -> list[str]:
"""Return a list of the supported Python versions."""
current_version = version_to_str(sys.version_info[:2])
versions = [version for version in SUPPORTED_PYTHON_VERSIONS if version == current_version] + \
[version for version in SUPPORTED_PYTHON_VERSIONS if version != current_version]
return versions
def get_python_path(self, version: str) -> str:
"""Return the path of the requested Python version."""
version = find_python(version)
return version
@property
def is_default(self) -> bool:
"""True if the completion entry is only used for defaults, otherwise False."""
return False
@dataclasses.dataclass(frozen=True)
| OriginCompletionConfig |
python | django__django | tests/delete/tests.py | {
"start": 27653,
"end": 33076
} | class ____(TestCase):
def test_fast_delete_all(self):
with self.assertNumQueries(1) as ctx:
User.objects.all().delete()
sql = ctx.captured_queries[0]["sql"]
# No subqueries is used when performing a full delete.
self.assertNotIn("SELECT", sql)
def test_fast_delete_fk(self):
u = User.objects.create(avatar=Avatar.objects.create())
a = Avatar.objects.get(pk=u.avatar_id)
# 1 query to fast-delete the user
# 1 query to delete the avatar
self.assertNumQueries(2, a.delete)
self.assertFalse(User.objects.exists())
self.assertFalse(Avatar.objects.exists())
def test_fast_delete_m2m(self):
t = M2MTo.objects.create()
f = M2MFrom.objects.create()
f.m2m.add(t)
# 1 to delete f, 1 to fast-delete m2m for f
self.assertNumQueries(2, f.delete)
def test_fast_delete_revm2m(self):
t = M2MTo.objects.create()
f = M2MFrom.objects.create()
f.m2m.add(t)
# 1 to delete t, 1 to fast-delete t's m_set
self.assertNumQueries(2, f.delete)
def test_fast_delete_qs(self):
u1 = User.objects.create()
u2 = User.objects.create()
self.assertNumQueries(1, User.objects.filter(pk=u1.pk).delete)
self.assertEqual(User.objects.count(), 1)
self.assertTrue(User.objects.filter(pk=u2.pk).exists())
def test_fast_delete_instance_set_pk_none(self):
u = User.objects.create()
# User can be fast-deleted.
collector = Collector(using="default")
self.assertTrue(collector.can_fast_delete(u))
u.delete()
self.assertIsNone(u.pk)
def test_fast_delete_joined_qs(self):
a = Avatar.objects.create(desc="a")
User.objects.create(avatar=a)
u2 = User.objects.create()
self.assertNumQueries(1, User.objects.filter(avatar__desc="a").delete)
self.assertEqual(User.objects.count(), 1)
self.assertTrue(User.objects.filter(pk=u2.pk).exists())
def test_fast_delete_inheritance(self):
c = Child.objects.create()
p = Parent.objects.create()
# 1 for self, 1 for parent
self.assertNumQueries(2, c.delete)
self.assertFalse(Child.objects.exists())
self.assertEqual(Parent.objects.count(), 1)
self.assertEqual(Parent.objects.filter(pk=p.pk).count(), 1)
# 1 for self delete, 1 for fast delete of empty "child" qs.
self.assertNumQueries(2, p.delete)
self.assertFalse(Parent.objects.exists())
# 1 for self delete, 1 for fast delete of empty "child" qs.
c = Child.objects.create()
p = c.parent_ptr
self.assertNumQueries(2, p.delete)
self.assertFalse(Parent.objects.exists())
self.assertFalse(Child.objects.exists())
def test_fast_delete_large_batch(self):
User.objects.bulk_create(User() for i in range(0, 2000))
# No problems here - we aren't going to cascade, so we will fast
# delete the objects in a single query.
self.assertNumQueries(1, User.objects.all().delete)
a = Avatar.objects.create(desc="a")
User.objects.bulk_create(User(avatar=a) for i in range(0, 2000))
# We don't hit parameter amount limits for a, so just one query for
# that + fast delete of the related objs.
self.assertNumQueries(2, a.delete)
self.assertEqual(User.objects.count(), 0)
def test_fast_delete_empty_no_update_can_self_select(self):
"""
Fast deleting when DatabaseFeatures.update_can_self_select = False
works even if the specified filter doesn't match any row (#25932).
"""
with self.assertNumQueries(1):
self.assertEqual(
User.objects.filter(avatar__desc="missing").delete(),
(0, {}),
)
def test_fast_delete_combined_relationships(self):
# The cascading fast-delete of SecondReferrer should be combined
# in a single DELETE WHERE referrer_id OR unique_field.
origin = Origin.objects.create()
referer = Referrer.objects.create(origin=origin, unique_field=42)
with self.assertNumQueries(2):
referer.delete()
def test_fast_delete_aggregation(self):
# Fast-deleting when filtering against an aggregation result in
# a single query containing a subquery.
Base.objects.create()
with self.assertNumQueries(1):
self.assertEqual(
Base.objects.annotate(
rels_count=models.Count("rels"),
)
.filter(rels_count=0)
.delete(),
(1, {"delete.Base": 1}),
)
self.assertIs(Base.objects.exists(), False)
def test_fast_delete_empty_result_set(self):
user = User.objects.create()
with self.assertNumQueries(0):
self.assertEqual(
User.objects.filter(pk__in=[]).delete(),
(0, {}),
)
self.assertSequenceEqual(User.objects.all(), [user])
def test_fast_delete_full_match(self):
avatar = Avatar.objects.create(desc="bar")
User.objects.create(avatar=avatar)
with self.assertNumQueries(1):
User.objects.filter(~Q(pk__in=[]) | Q(avatar__desc="foo")).delete()
self.assertFalse(User.objects.exists())
| FastDeleteTests |
python | apache__airflow | providers/google/tests/unit/google/cloud/hooks/test_cloud_storage_transfer_service.py | {
"start": 22133,
"end": 31036
} | class ____:
def setup_method(self):
with mock.patch(
"airflow.providers.google.common.hooks.base_google.GoogleBaseHook.__init__",
new=mock_base_gcp_hook_default_project_id,
):
self.gct_hook = CloudDataTransferServiceHook(gcp_conn_id="test")
@mock.patch(
"airflow.providers.google.cloud.hooks.cloud_storage_transfer_service"
".CloudDataTransferServiceHook._authorize"
)
@mock.patch("airflow.providers.google.cloud.hooks.cloud_storage_transfer_service.build")
def test_gct_client_creation(self, mock_build, mock_authorize):
result = self.gct_hook.get_conn()
mock_build.assert_called_once_with(
"storagetransfer", "v1", http=mock_authorize.return_value, cache_discovery=False
)
assert mock_build.return_value == result
assert self.gct_hook._conn == result
@mock.patch(
"airflow.providers.google.common.hooks.base_google.GoogleBaseHook.project_id",
new_callable=PropertyMock,
return_value=GCP_PROJECT_ID_HOOK_UNIT_TEST,
)
@mock.patch(
"airflow.providers.google.cloud.hooks.cloud_storage_transfer_service"
".CloudDataTransferServiceHook.get_conn"
)
def test_create_transfer_job(self, get_conn, mock_project_id):
create_method = get_conn.return_value.transferJobs.return_value.create
execute_method = create_method.return_value.execute
execute_method.return_value = deepcopy(TEST_TRANSFER_JOB)
res = self.gct_hook.create_transfer_job(body=self._without_project_id(TEST_BODY))
assert res == TEST_TRANSFER_JOB
create_method.assert_called_once_with(body=self._with_project_id(TEST_BODY, "example-project"))
execute_method.assert_called_once_with(num_retries=5)
@mock.patch(
"airflow.providers.google.common.hooks.base_google.GoogleBaseHook.project_id",
new_callable=PropertyMock,
return_value=GCP_PROJECT_ID_HOOK_UNIT_TEST,
)
@mock.patch(
"airflow.providers.google.cloud.hooks.cloud_storage_transfer_service"
".CloudDataTransferServiceHook.get_conn"
)
def test_get_transfer_job(self, get_conn, mock_project_id):
get_method = get_conn.return_value.transferJobs.return_value.get
execute_method = get_method.return_value.execute
execute_method.return_value = TEST_TRANSFER_JOB
res = self.gct_hook.get_transfer_job(job_name=TEST_TRANSFER_JOB_NAME)
assert res is not None
assert res[NAME] == TEST_TRANSFER_JOB_NAME
get_method.assert_called_once_with(jobName=TEST_TRANSFER_JOB_NAME, projectId="example-project")
execute_method.assert_called_once_with(num_retries=5)
@mock.patch(
"airflow.providers.google.common.hooks.base_google.GoogleBaseHook.project_id",
new_callable=PropertyMock,
return_value=GCP_PROJECT_ID_HOOK_UNIT_TEST,
)
@mock.patch(
"airflow.providers.google.cloud.hooks.cloud_storage_transfer_service"
".CloudDataTransferServiceHook.get_conn"
)
def test_list_transfer_job(self, get_conn, mock_project_id):
list_method = get_conn.return_value.transferJobs.return_value.list
list_execute_method = list_method.return_value.execute
list_execute_method.return_value = {TRANSFER_JOBS: [TEST_TRANSFER_JOB]}
list_next = get_conn.return_value.transferJobs.return_value.list_next
list_next.return_value = None
res = self.gct_hook.list_transfer_job(
request_filter=_without_key(TEST_TRANSFER_JOB_FILTER, FILTER_PROJECT_ID)
)
assert res is not None
assert res == [TEST_TRANSFER_JOB]
list_method.assert_called_once_with(filter=mock.ANY)
args, kwargs = list_method.call_args_list[0]
assert json.loads(kwargs["filter"]) == {
FILTER_PROJECT_ID: "example-project",
FILTER_JOB_NAMES: [TEST_TRANSFER_JOB_NAME],
}
list_execute_method.assert_called_once_with(num_retries=5)
@mock.patch(
"airflow.providers.google.common.hooks.base_google.GoogleBaseHook.project_id",
new_callable=PropertyMock,
return_value=GCP_PROJECT_ID_HOOK_UNIT_TEST,
)
@mock.patch(
"airflow.providers.google.cloud.hooks.cloud_storage_transfer_service"
".CloudDataTransferServiceHook.get_conn"
)
def test_update_transfer_job(self, get_conn, mock_project_id):
update_method = get_conn.return_value.transferJobs.return_value.patch
execute_method = update_method.return_value.execute
execute_method.return_value = TEST_TRANSFER_JOB
res = self.gct_hook.update_transfer_job(
job_name=TEST_TRANSFER_JOB_NAME, body=self._without_project_id(TEST_UPDATE_TRANSFER_JOB_BODY)
)
assert res is not None
update_method.assert_called_once_with(
jobName=TEST_TRANSFER_JOB_NAME,
body=self._with_project_id(TEST_UPDATE_TRANSFER_JOB_BODY, "example-project"),
)
execute_method.assert_called_once_with(num_retries=5)
@mock.patch(
"airflow.providers.google.cloud.hooks.cloud_storage_transfer_service"
".CloudDataTransferServiceHook.get_conn"
)
def test_delete_transfer_job(self, get_conn):
update_method = get_conn.return_value.transferJobs.return_value.patch
execute_method = update_method.return_value.execute
self.gct_hook.delete_transfer_job(job_name=TEST_TRANSFER_JOB_NAME, project_id=TEST_PROJECT_ID)
update_method.assert_called_once_with(
jobName=TEST_TRANSFER_JOB_NAME,
body={
PROJECT_ID: TEST_PROJECT_ID,
TRANSFER_JOB: {STATUS: "DELETED"},
TRANSFER_JOB_FIELD_MASK: STATUS,
},
)
execute_method.assert_called_once_with(num_retries=5)
@mock.patch(
"airflow.providers.google.cloud.hooks.cloud_storage_transfer_service"
".CloudDataTransferServiceHook.get_conn"
)
def test_cancel_transfer_operation(self, get_conn):
cancel_method = get_conn.return_value.transferOperations.return_value.cancel
execute_method = cancel_method.return_value.execute
self.gct_hook.cancel_transfer_operation(operation_name=TEST_TRANSFER_OPERATION_NAME)
cancel_method.assert_called_once_with(name=TEST_TRANSFER_OPERATION_NAME)
execute_method.assert_called_once_with(num_retries=5)
@mock.patch(
"airflow.providers.google.cloud.hooks.cloud_storage_transfer_service"
".CloudDataTransferServiceHook.get_conn"
)
def test_get_transfer_operation(self, get_conn):
get_method = get_conn.return_value.transferOperations.return_value.get
execute_method = get_method.return_value.execute
execute_method.return_value = TEST_TRANSFER_OPERATION
res = self.gct_hook.get_transfer_operation(operation_name=TEST_TRANSFER_OPERATION_NAME)
assert res == TEST_TRANSFER_OPERATION
get_method.assert_called_once_with(name=TEST_TRANSFER_OPERATION_NAME)
execute_method.assert_called_once_with(num_retries=5)
@mock.patch(
"airflow.providers.google.common.hooks.base_google.GoogleBaseHook.project_id",
new_callable=PropertyMock,
return_value=GCP_PROJECT_ID_HOOK_UNIT_TEST,
)
@mock.patch(
"airflow.providers.google.cloud.hooks.cloud_storage_transfer_service"
".CloudDataTransferServiceHook.get_conn"
)
def test_list_transfer_operation(self, get_conn, mock_project_id):
list_method = get_conn.return_value.transferOperations.return_value.list
list_execute_method = list_method.return_value.execute
list_execute_method.return_value = {OPERATIONS: [TEST_TRANSFER_OPERATION]}
list_next = get_conn.return_value.transferOperations.return_value.list_next
list_next.return_value = None
res = self.gct_hook.list_transfer_operations(
request_filter=_without_key(TEST_TRANSFER_OPERATION_FILTER, FILTER_PROJECT_ID)
)
assert res is not None
assert res == [TEST_TRANSFER_OPERATION]
list_method.assert_called_once_with(filter=mock.ANY, name="transferOperations")
args, kwargs = list_method.call_args_list[0]
assert json.loads(kwargs["filter"]) == {
FILTER_PROJECT_ID: "example-project",
FILTER_JOB_NAMES: [TEST_TRANSFER_JOB_NAME],
}
list_execute_method.assert_called_once_with(num_retries=5)
@staticmethod
def _without_project_id(body):
body = deepcopy(body)
del body[PROJECT_ID]
return body
@staticmethod
def _with_project_id(body, project_id):
body = deepcopy(body)
del body[PROJECT_ID]
body[PROJECT_ID] = project_id
return body
| TestGCPTransferServiceHookWithProjectIdFromConnection |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_chart_bar21.py | {
"start": 315,
"end": 1660
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("chart_bar21.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({"type": "bar"})
chart.axis_ids = [64052224, 64055552]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column("A1", data[0])
worksheet.write_column("B1", data[1])
worksheet.write_column("C1", data[2])
chart.add_series(
{
"categories": "=Sheet1!$A$1:$A$5",
"values": "=Sheet1!$B$1:$B$5",
"categories_data": data[0],
"values_data": data[1],
}
)
chart.add_series(
{
"categories": "=Sheet1!$A$1:$A$5",
"values": "=Sheet1!$C$1:$C$5",
"categories_data": data[0],
"values_data": data[2],
}
)
worksheet.insert_chart("E9", chart)
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | PrefectHQ__prefect | src/prefect/client/schemas/objects.py | {
"start": 48535,
"end": 49124
} | class ____(PrefectBaseModel):
healthy: bool = Field(..., description="Whether or not the work queue is healthy.")
late_runs_count: int = Field(
default=0, description="The number of late flow runs in the work queue."
)
last_polled: Optional[DateTime] = Field(
default=None, description="The last time an agent polled this queue for work."
)
health_check_policy: WorkQueueHealthPolicy = Field(
...,
description=(
"The policy used to determine whether or not the work queue is healthy."
),
)
| WorkQueueStatusDetail |
python | joke2k__faker | faker/providers/internet/fa_IR/__init__.py | {
"start": 42,
"end": 328
} | class ____(BaseProvider):
safe_email_tlds = ("com", "net", "ir", "org")
free_email_domains = (
"chmail.ir",
"mailfa.com",
"gmail.com",
"hotmail.com",
"yahoo.com",
)
tlds = ("com", "com", "com", "net", "org", "ir", "ir", "ir")
| Provider |
python | keras-team__keras | keras/src/optimizers/schedules/learning_rate_schedule.py | {
"start": 2460,
"end": 6359
} | class ____(LearningRateSchedule):
"""A `LearningRateSchedule` that uses an exponential decay schedule.
When training a model, it is often useful to lower the learning rate as
the training progresses. This schedule applies an exponential decay function
to an optimizer step, given a provided initial learning rate.
The schedule is a 1-arg callable that produces a decayed learning
rate when passed the current optimizer step. This can be useful for changing
the learning rate value across different invocations of optimizer functions.
It is computed as:
```python
def decayed_learning_rate(step):
return initial_learning_rate * decay_rate ^ (step / decay_steps)
```
If the argument `staircase` is `True`, then `step / decay_steps` is
an integer division and the decayed learning rate follows a
staircase function.
You can pass this schedule directly into a `keras.optimizers.Optimizer`
as the learning rate.
Example: When fitting a Keras model, decay every 100000 steps with a base
of 0.96:
```python
initial_learning_rate = 0.1
lr_schedule = keras.optimizers.schedules.ExponentialDecay(
initial_learning_rate,
decay_steps=100000,
decay_rate=0.96,
staircase=True)
model.compile(optimizer=keras.optimizers.SGD(learning_rate=lr_schedule),
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
model.fit(data, labels, epochs=5)
```
The learning rate schedule is also serializable and deserializable using
`keras.optimizers.schedules.serialize` and
`keras.optimizers.schedules.deserialize`.
Args:
initial_learning_rate: A Python float. The initial learning rate.
decay_steps: A Python integer. Must be positive. See the decay
computation above.
decay_rate: A Python float. The decay rate.
staircase: Boolean. If `True` decay the learning rate at discrete
intervals.
name: String. Optional name of the operation. Defaults to
`"ExponentialDecay`".
Returns:
A 1-arg callable learning rate schedule that takes the current optimizer
step and outputs the decayed learning rate, a scalar tensor of the
same type as `initial_learning_rate`.
"""
def __init__(
self,
initial_learning_rate,
decay_steps,
decay_rate,
staircase=False,
name="ExponentialDecay",
):
super().__init__()
self.initial_learning_rate = initial_learning_rate
self.decay_steps = decay_steps
self.decay_rate = decay_rate
self.staircase = staircase
self.name = name
if self.decay_steps <= 0:
raise ValueError(
"Argument `decay_steps` must be > 0. "
f"Received: decay_steps={self.decay_steps}"
)
def __call__(self, step):
with ops.name_scope(self.name):
initial_learning_rate = ops.convert_to_tensor(
self.initial_learning_rate
)
dtype = initial_learning_rate.dtype
decay_steps = ops.cast(self.decay_steps, dtype)
decay_rate = ops.cast(self.decay_rate, dtype)
global_step_recomp = ops.cast(step, dtype)
p = global_step_recomp / decay_steps
if self.staircase:
p = ops.floor(p)
return ops.multiply(initial_learning_rate, ops.power(decay_rate, p))
def get_config(self):
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_steps": self.decay_steps,
"decay_rate": self.decay_rate,
"staircase": self.staircase,
"name": self.name,
}
@keras_export("keras.optimizers.schedules.PiecewiseConstantDecay")
| ExponentialDecay |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/array_ops/where_op_test.py | {
"start": 1269,
"end": 8394
} | class ____(test.TestCase):
def _testWhere(self, x, truth, expected_err_re=None, fn=array_ops.where):
with self.cached_session():
ans = fn(x)
self.assertTrue(ans.get_shape().is_compatible_with([None, x.ndim]))
if expected_err_re is None:
tf_ans = self.evaluate(ans)
self.assertAllClose(tf_ans, truth, atol=1e-10)
else:
with self.assertRaisesOpError(expected_err_re):
self.evaluate(ans)
def _testWrongNumbers(self, fn=array_ops.where):
with self.session():
with self.assertRaises(ValueError):
fn([False, True], [1, 2], None)
with self.assertRaises(ValueError):
fn([False, True], None, [1, 2])
def _testBasicVec(self, fn=array_ops.where):
x = np.asarray([True, False])
truth = np.asarray([[0]], dtype=np.int64)
self._testWhere(x, truth, None, fn)
x = np.asarray([False, True, False])
truth = np.asarray([[1]], dtype=np.int64)
self._testWhere(x, truth, None, fn)
x = np.asarray([False, False, True, False, True])
truth = np.asarray([[2], [4]], dtype=np.int64)
self._testWhere(x, truth, None, fn)
def _testRandomVec(self, fn=array_ops.where):
x = np.random.rand(1000000) > 0.5
truth = np.vstack([np.where(x)[0].astype(np.int64)]).T
self._testWhere(x, truth, None, fn)
def _testBasicMat(self, fn=array_ops.where):
x = np.asarray([[True, False], [True, False]])
# Ensure RowMajor mode
truth = np.asarray([[0, 0], [1, 0]], dtype=np.int64)
self._testWhere(x, truth, None, fn)
def _testBasic3Tensor(self, fn=array_ops.where):
x = np.asarray([[[True, False], [True, False]],
[[False, True], [False, True]],
[[False, False], [False, True]]])
# Ensure RowMajor mode
truth = np.asarray(
[[0, 0, 0], [0, 1, 0], [1, 0, 1], [1, 1, 1], [2, 1, 1]], dtype=np.int64)
self._testWhere(x, truth, None, fn)
def _testRandom(self, dtype, expected_err_re=None, fn=array_ops.where):
shape = [127, 33, 53]
x = np.random.randn(*shape) + 1j * np.random.randn(*shape)
x = (np.random.randn(*shape) > 0).astype(dtype)
truth = np.where(np.abs(x) > 0) # Tuples of indices by axis.
truth = np.vstack(truth).T # Convert to [num_true, indices].
self._testWhere(x, truth, expected_err_re, fn)
def _testThreeArgument(self, fn=array_ops.where):
x = np.array([[-2, 3, -1], [1, -3, -3]])
np_val = np.where(x > 0, x * x, -x)
with self.test_session():
tf_val = self.evaluate(fn(constant_op.constant(x) > 0, x * x, -x))
self.assertAllEqual(tf_val, np_val)
def testWrongNumbers(self):
self._testWrongNumbers()
@test_util.run_deprecated_v1
def testBasicVec(self):
self._testBasicVec()
@test_util.run_deprecated_v1
def testRandomVec(self):
self._testRandomVec()
@test_util.run_deprecated_v1
def testBasicMat(self):
self._testBasicMat()
@test_util.run_deprecated_v1
def testBasic3Tensor(self):
self._testBasic3Tensor()
@test_util.run_deprecated_v1
def testRandomBool(self):
self._testRandom(np.bool_)
@test_util.run_deprecated_v1
def testRandomInt32(self):
self._testRandom(np.int32)
@test_util.run_deprecated_v1
def testRandomInt64(self):
self._testRandom(np.int64)
@test_util.run_deprecated_v1
def testRandomFloat(self):
self._testRandom(np.float32)
@test_util.run_deprecated_v1
def testRandomDouble(self):
self._testRandom(np.float64)
@test_util.run_deprecated_v1
def testRandomComplex64(self):
self._testRandom(np.complex64)
@test_util.run_deprecated_v1
def testRandomComplex128(self):
self._testRandom(np.complex128)
@test_util.run_deprecated_v1
def testRandomUint8(self):
self._testRandom(np.uint8)
@test_util.run_deprecated_v1
def testRandomInt8(self):
self._testRandom(np.int8)
@test_util.run_deprecated_v1
def testRandomInt16(self):
self._testRandom(np.int16)
@test_util.run_deprecated_v1
def testThreeArgument(self):
self._testThreeArgument()
def testV2WrongNumbers(self):
self._testWrongNumbers(array_ops.where_v2)
def testV2BasicVec(self):
self._testBasicVec(array_ops.where_v2)
def testV2RandomVec(self):
self._testRandomVec(array_ops.where_v2)
def testV2BasicMat(self):
self._testBasicMat(array_ops.where_v2)
def testV2Basic3Tensor(self):
self._testBasic3Tensor(array_ops.where_v2)
def testV2RandomBool(self):
self._testRandom(np.bool_, None, array_ops.where_v2)
def testV2RandomInt32(self):
self._testRandom(np.int32, None, array_ops.where_v2)
def testV2RandomInt64(self):
self._testRandom(np.int64, None, array_ops.where_v2)
def testV2RandomFloat(self):
self._testRandom(np.float32, None, array_ops.where_v2)
def testV2RandomDouble(self):
self._testRandom(np.float64, None, array_ops.where_v2)
def testV2RandomComplex64(self):
self._testRandom(np.complex64, None, array_ops.where_v2)
def testV2RandomComplex128(self):
self._testRandom(np.complex128, None, array_ops.where_v2)
def testV2RandomUint8(self):
self._testRandom(np.uint8, None, array_ops.where_v2)
def testV2RandomInt8(self):
self._testRandom(np.int8, None, array_ops.where_v2)
def testV2RandomInt16(self):
self._testRandom(np.int16, None, array_ops.where_v2)
def testV2ThreeArgument(self):
self._testThreeArgument(array_ops.where_v2)
def testV2Broadcasting(self):
f = np.random.normal(0, 1, (3, 5, 1, 1))
x = np.zeros((7, 11))
y = np.ones((7, 11))
np_val = np.where(f < 0, x, y)
with self.test_session():
tf_val = self.evaluate(
array_ops.where_v2(constant_op.constant(f) < 0, x, y))
self.assertAllEqual(tf_val, np_val)
def testV2ScalarBroadcasting(self):
x = np.zeros((7, 11))
y = np.ones((7, 11))
np_val = np.where(True, x, y)
with self.test_session():
tf_val = self.evaluate(
array_ops.where_v2(
constant_op.constant(True, dtype=dtypes.bool), x, y))
self.assertAllEqual(tf_val, np_val)
def testV2VectorBroadcasting(self):
x = np.zeros(7)
y = np.ones(7)
np_val = np.where([True], x, y)
with self.test_session():
tf_val = self.evaluate(
array_ops.where_v2(
constant_op.constant([True], dtype=dtypes.bool), x, y))
self.assertAllEqual(tf_val, np_val)
def testV2PredBroadcasting(self):
pred = np.array([1, 0, 0]).reshape((3, 1))
x = np.random.randn(3, 4)
y = np.random.randn(3, 4)
np_val = np.where(pred, x, y)
with self.test_session():
tf_val = self.evaluate(array_ops.where_v2(pred, x, y))
self.assertAllClose(tf_val, np_val)
@test_util.run_deprecated_v1
def testBatchSelect(self):
x = np.array([[-2, 3, -1] * 64, [1, -3, -3] * 64] * 8192) # [16384, 192]
c_mat = np.array([[False] * 192, [True] * 192] * 8192) # [16384, 192]
c_vec = np.array([False, True] * 8192) # [16384]
np_val = np.where(c_mat, x * x, -x)
with self.session():
tf_val = array_ops.where(c_vec, x * x, -x).eval()
self.assertAllEqual(tf_val, np_val)
| WhereOpTest |
python | django__django | tests/requests_tests/tests.py | {
"start": 940,
"end": 1138
} | class ____(MemoryFileUploadHandler):
def handle_raw_input(
self, input_data, META, content_length, boundary, encoding=None
):
return ("_POST", "_FILES")
| CustomFileUploadHandler |
python | sympy__sympy | sympy/functions/combinatorial/numbers.py | {
"start": 51744,
"end": 56123
} | class ____(DefinedFunction):
r"""
Andre numbers / Andre function
The Andre number `\mathcal{A}_n` is Luschny's name for half the number of
*alternating permutations* on `n` elements, where a permutation is alternating
if adjacent elements alternately compare "greater" and "smaller" going from
left to right. For example, `2 < 3 > 1 < 4` is an alternating permutation.
This sequence is A000111 in the OEIS, which assigns the names *up/down numbers*
and *Euler zigzag numbers*. It satisfies a recurrence relation similar to that
for the Catalan numbers, with `\mathcal{A}_0 = 1` and
.. math:: 2 \mathcal{A}_{n+1} = \sum_{k=0}^n \binom{n}{k} \mathcal{A}_k \mathcal{A}_{n-k}
The Bernoulli and Euler numbers are signed transformations of the odd- and
even-indexed elements of this sequence respectively:
.. math :: \operatorname{B}_{2k} = \frac{2k \mathcal{A}_{2k-1}}{(-4)^k - (-16)^k}
.. math :: \operatorname{E}_{2k} = (-1)^k \mathcal{A}_{2k}
Like the Bernoulli and Euler numbers, the Andre numbers are interpolated by the
entire Andre function:
.. math :: \mathcal{A}(s) = (-i)^{s+1} \operatorname{Li}_{-s}(i) +
i^{s+1} \operatorname{Li}_{-s}(-i) = \\ \frac{2 \Gamma(s+1)}{(2\pi)^{s+1}}
(\zeta(s+1, 1/4) - \zeta(s+1, 3/4) \cos{\pi s})
Examples
========
>>> from sympy import andre, euler, bernoulli
>>> [andre(n) for n in range(11)]
[1, 1, 1, 2, 5, 16, 61, 272, 1385, 7936, 50521]
>>> [(-1)**k * andre(2*k) for k in range(7)]
[1, -1, 5, -61, 1385, -50521, 2702765]
>>> [euler(2*k) for k in range(7)]
[1, -1, 5, -61, 1385, -50521, 2702765]
>>> [andre(2*k-1) * (2*k) / ((-4)**k - (-16)**k) for k in range(1, 8)]
[1/6, -1/30, 1/42, -1/30, 5/66, -691/2730, 7/6]
>>> [bernoulli(2*k) for k in range(1, 8)]
[1/6, -1/30, 1/42, -1/30, 5/66, -691/2730, 7/6]
See Also
========
bernoulli, catalan, euler, sympy.polys.appellseqs.andre_poly
References
==========
.. [1] https://en.wikipedia.org/wiki/Alternating_permutation
.. [2] https://mathworld.wolfram.com/EulerZigzagNumber.html
.. [3] Peter Luschny, "An introduction to the Bernoulli function",
https://arxiv.org/abs/2009.06743
"""
@classmethod
def eval(cls, n):
if n is S.NaN:
return S.NaN
elif n is S.Infinity:
return S.Infinity
if n.is_zero:
return S.One
elif n == -1:
return -log(2)
elif n == -2:
return -2*S.Catalan
elif n.is_Integer:
if n.is_nonnegative and n.is_even:
return abs(euler(n))
elif n.is_odd:
from sympy.functions.special.zeta_functions import zeta
m = -n-1
return I**m * Rational(1-2**m, 4**m) * zeta(-n)
def _eval_rewrite_as_zeta(self, s, **kwargs):
from sympy.functions.elementary.trigonometric import cos
from sympy.functions.special.gamma_functions import gamma
from sympy.functions.special.zeta_functions import zeta
return 2 * gamma(s+1) / (2*pi)**(s+1) * \
(zeta(s+1, S.One/4) - cos(pi*s) * zeta(s+1, S(3)/4))
def _eval_rewrite_as_polylog(self, s, **kwargs):
from sympy.functions.special.zeta_functions import polylog
return (-I)**(s+1) * polylog(-s, I) + I**(s+1) * polylog(-s, -I)
def _eval_is_integer(self):
n = self.args[0]
if n.is_integer and n.is_nonnegative:
return True
def _eval_is_positive(self):
if self.args[0].is_nonnegative:
return True
def _eval_evalf(self, prec):
if not self.args[0].is_number:
return
s = self.args[0]._to_mpmath(prec+12)
with workprec(prec+12):
sp, cp = mp.sinpi(s/2), mp.cospi(s/2)
res = 2*mp.dirichlet(-s, (-sp, cp, sp, -cp))
return Expr._from_mpmath(res, prec)
#----------------------------------------------------------------------------#
# #
# Partition numbers #
# #
#----------------------------------------------------------------------------#
| andre |
python | google__jax | tests/custom_api_test.py | {
"start": 118131,
"end": 137684
} | class ____(jtu.JaxTestCase):
def test_basic(self):
@jax.custom_batching.custom_vmap
def f(x): return jnp.sin(x)
@f.def_vmap
def rule(axis_size, in_batched, xs):
xs_batched, = in_batched
self.assertEqual(xs_batched, True)
self.assertEqual(axis_size, xs.shape[0])
return jnp.cos(xs), xs_batched
x, xs = jnp.array(1.), jnp.arange(3)
y = f(x)
self.assertAllClose(y, jnp.sin(x))
ys = api.vmap(f)(xs)
self.assertAllClose(ys, jnp.cos(xs))
@jax.numpy_dtype_promotion('standard')
def test_closure(self):
z = jnp.array([2., 1., 3.])
@jax.custom_batching.custom_vmap
def f(x): return z + jnp.sin(x)
@f.def_vmap
def rule(axis_size, in_batched, *args):
self.assertEqual(len(in_batched), 1)
self.assertEqual(len(args), 1)
xs, = args
xs_batched, = in_batched
self.assertEqual(xs_batched, True)
self.assertEqual(axis_size, xs.shape[0])
return z + jnp.cos(xs), xs_batched
x, xs = jnp.array(1.), jnp.arange(3)
y = f(x)
self.assertAllClose(y, z + jnp.sin(x))
ys = api.vmap(f)(xs)
self.assertAllClose(ys, z + jnp.cos(xs))
def test_rule_multi_output(self):
@jax.custom_batching.custom_vmap
def f(x): return jnp.sin(x), jnp.cos(x)
@f.def_vmap
def rule(axis_size, in_batched, xs):
return (jnp.cos(xs), jnp.sin(xs)), tuple(in_batched * 2)
x, xs = jnp.array(1.), jnp.arange(3)
y1, y2 = f(x)
self.assertAllClose(y1, jnp.sin(x))
self.assertAllClose(y2, jnp.cos(x))
ys1, ys2 = api.vmap(f)(xs)
self.assertAllClose(ys1, jnp.cos(xs))
self.assertAllClose(ys2, jnp.sin(xs))
def test_nary(self):
@jax.custom_batching.custom_vmap
def f(x, y): return jnp.sin(x) + y ** 2.
@f.def_vmap
def rule(axis_size, in_batched, xs, ys):
self.assertEqual(in_batched, [True, True])
self.assertEqual(axis_size, 3)
self.assertEqual(axis_size, xs.shape[0])
self.assertEqual(axis_size, ys.shape[0])
return jnp.cos(xs) + ys ** 2., True
xs, ys = jnp.arange(3.0), jnp.arange(3.0)
zs = api.vmap(f)(xs, ys)
self.assertAllClose(zs, jnp.cos(xs) + ys ** 2.)
def test_nary_mixed_batching(self):
@jax.custom_batching.custom_vmap
def vector_dot(u, v):
self.assertEqual(u.ndim, 1)
self.assertEqual(v.ndim, 1)
return u @ v
size = 4
vlen = 3
in_batched_log = []
@vector_dot.def_vmap
def vector_dot_vmap_rule(axis_size, in_batched, u, v):
in_batched_log.append(in_batched)
self.assertEqual(axis_size, size)
u_batched, v_batched = in_batched
if u_batched:
self.assertEqual(u.ndim, 2)
self.assertEqual(u.shape[0], size)
else:
self.assertEqual(u.ndim, 1)
self.assertEqual(u.shape[0], vlen)
if v_batched:
self.assertEqual(v.ndim, 2)
self.assertEqual(v.shape[0], size)
else:
self.assertEqual(v.ndim, 1)
self.assertEqual(v.shape[0], vlen)
if u_batched and v_batched:
out = jnp.sum(u * v, axis=1)
else:
out = u @ v if u_batched else v @ u
return out, u_batched or v_batched
f = vector_dot
v = lambda *shape: jnp.ones(shape)
y = api.vmap(f, in_axes=(0, None))(v(4, 3), v(3))
self.assertAllClose(y, v(4, 3) @ v(3))
y = api.vmap(f, in_axes=(1, None))(v(3, 4), v(3))
self.assertAllClose(y, v(3, 4).T @ v(3))
y = api.vmap(f, in_axes=(None, 0))(v(3), v(4, 3))
self.assertAllClose(y, v(3) @ v(4, 3).T)
y = api.vmap(f, in_axes=(0, 0))(v(4, 3), v(4, 3))
self.assertAllClose(y, jnp.sum(v(4, 3) * v(4, 3), axis=1))
self.assertEqual(in_batched_log[0], [True, False])
self.assertEqual(in_batched_log[1], [True, False])
self.assertEqual(in_batched_log[2], [False, True])
self.assertEqual(in_batched_log[3], [True, True])
def test_rule_input_signature(self):
@jax.custom_batching.custom_vmap
def f(x): return jnp.sin(x)
rule_args = []
@f.def_vmap
def rule(axis_size, in_batched, xs):
rule_args.append((axis_size, in_batched))
return jnp.cos(xs), in_batched[0]
xs = jnp.arange(3)
_ = api.vmap(f)(xs)
(axis_size, in_batched), = rule_args
self.assertIs(type(axis_size), int)
self.assertIs(type(in_batched), list)
self.assertEqual(len(in_batched), 1)
def test_rule_output_vs_batching_output_mismatch(self):
@jax.custom_batching.custom_vmap
def f(x): return jnp.sin(x)
@f.def_vmap
def test_rule_abc(axis_size, in_batched, xs):
return [jnp.sin(xs), jnp.cos(xs)], in_batched
xs = jnp.arange(3)
self.assertRaisesRegex(
ValueError,
'structure of output value and output batching specification '
r'returned by custom vmap rule \(test_rule_abc\) do not match.*',
lambda: api.vmap(f)(xs))
def test_rule_vs_call_output_mismatch(self):
@jax.custom_batching.custom_vmap
def f(x): return jnp.sin(x)
@f.def_vmap
def test_rule_abc2(axis_size, in_batched, xs):
return [jnp.sin(xs)], in_batched
xs = jnp.arange(3)
self.assertRaisesRegex(
ValueError,
r'structure of output returned by custom vmap rule \(test_rule_abc2\) '
r'does not match that of original custom-vmapped function.*',
lambda: api.vmap(f)(xs))
def test_jvp_basic(self):
@jax.custom_batching.custom_vmap
def f(x): return jnp.sin(x)
@f.def_vmap
def rule(axis_size, in_batched, xs):
self.assertEqual(axis_size, 3)
self.assertEqual(in_batched, [True])
return jnp.cos(xs), in_batched[0]
f_jvp = lambda x, tx: api.jvp(f, [x], [tx])
x, tx = jnp.array(1.), jnp.array(2.)
xs, txs = jnp.arange(3.), jnp.arange(3.) * 2.
y, ty = f_jvp(x, tx)
self.assertAllClose(y, jnp.sin(x))
self.assertAllClose(ty, jnp.cos(x) * tx)
ys, tys = api.vmap(f_jvp)(xs, txs)
self.assertAllClose(ys, jnp.cos(xs))
self.assertAllClose(tys, -jnp.sin(xs) * txs)
ys, tys = api.jvp(api.vmap(f), [xs], [txs])
self.assertAllClose(ys, jnp.cos(xs))
self.assertAllClose(tys, -jnp.sin(xs) * txs)
@jax.numpy_dtype_promotion('standard')
def test_jvp_closure(self):
z = jnp.array([2., 1., 3.])
def bcast(x): return z + x - z
@jax.custom_batching.custom_vmap
def f(x): return z + jnp.sin(x)
@f.def_vmap
def rule(axis_size, in_batched, xs):
self.assertEqual(axis_size, 3)
self.assertEqual(in_batched, [True])
return z + jnp.cos(xs), in_batched[0]
f_jvp = lambda x, tx: api.jvp(f, [x], [tx])
x, tx = jnp.array(1.), jnp.array(2.)
xs, txs = jnp.arange(3.), jnp.arange(3.) * 2.
y, ty = f_jvp(x, tx)
self.assertAllClose(y, z + jnp.sin(x))
self.assertAllClose(ty, bcast(jnp.cos(x)) * tx)
ys, tys = api.vmap(f_jvp)(xs, txs)
self.assertAllClose(ys, z + jnp.cos(xs))
self.assertAllClose(tys, bcast(-jnp.sin(xs)) * txs)
ys, tys = api.jvp(api.vmap(f), [xs], [txs])
self.assertAllClose(ys, z + jnp.cos(xs))
self.assertAllClose(tys, bcast(-jnp.sin(xs)) * txs)
def test_jvp_nary(self):
@jax.custom_batching.custom_vmap
def f(x, y): return jnp.sin(x) + y
@f.def_vmap
def rule(axis_size, in_batched, xs, ys):
self.assertEqual(axis_size, 3)
self.assertEqual(in_batched, [True, True])
return jnp.cos(xs) + ys, True
f_jvp = lambda x, y, tx, ty: api.jvp(f, [x, y], [tx, ty])
x, y, tx, ty = jnp.arange(4.)
xs, ys, txs, tys = 4. + jnp.arange(3. * 4).reshape((4, 3))
zs, tzs = api.vmap(f_jvp)(xs, ys, txs, tys)
self.assertAllClose(zs, jnp.cos(xs) + ys)
self.assertAllClose(tzs, -jnp.sin(xs) * txs + tys)
zs, tzs = api.jvp(api.vmap(f), [xs, ys], [txs, tys])
self.assertAllClose(zs, jnp.cos(xs) + ys)
self.assertAllClose(tzs, -jnp.sin(xs) * txs + tys)
def test_jvp_extra_batched_tangents(self):
@jax.custom_batching.custom_vmap
def f(x): return jnp.sin(x)
@f.def_vmap
def rule(axis_size, in_batched, xs):
self.assertEqual(axis_size, 3)
self.assertEqual(in_batched, [False])
return jnp.cos(xs), in_batched[0]
f_jvp = lambda x, tx: api.jvp(f, [x], [tx])
txs = 2. + jnp.arange(3.)
x = jnp.array(1, dtype=txs.dtype)
y, tys = api.vmap(f_jvp, in_axes=(None, 0), out_axes=(None, 0))(x, txs)
self.assertAllClose(y, jnp.cos(x))
self.assertAllClose(tys, -jnp.sin(x) * txs)
def test_jacfwd(self):
# jacfwd is another way to exercise extra-batched tangents
@jax.custom_batching.custom_vmap
def f(x): return jnp.sin(x)
@f.def_vmap
def rule(axis_size, in_batched, xs):
self.assertEqual(axis_size, 3)
self.assertEqual(in_batched, [False])
return jnp.cos(xs), in_batched[0]
x = jnp.arange(3.) + .72
j = api.jacfwd(f)(x)
self.assertAllClose(j, -jnp.diag(jnp.sin(x)))
def test_jvp_extra_batched_primals(self):
@jax.custom_batching.custom_vmap
def f(x): return jnp.sin(x)
@f.def_vmap
def rule(axis_size, in_batched, xs):
self.assertEqual(axis_size, 3)
self.assertEqual(in_batched, [False])
return jnp.cos(xs), in_batched[0]
f_jvp = lambda x, tx: api.jvp(f, [x], [tx])
xs = jnp.arange(3.)
tx = jnp.array(4, dtype=xs.dtype)
ys, tys = api.vmap(f_jvp, in_axes=(0, None))(xs, tx)
self.assertAllClose(ys, jnp.cos(xs))
self.assertAllClose(tys, -jnp.sin(xs) * tx)
def test_jvp_extra_batched_primals_with_linear_vmap_rule(self):
# When a function is linear, its Jacobian is constant. JAX's JVP
# of linear functions takes advantage of this: when mapping over a
# batch of primals relative to a fixed (i.e. symbolically
# replicated) tangent, output tangents remain replicated as well
# (i.e. JAX will not broadcast them). This is true in general, and
# this test checks that vmapped JVPs continue to behave this way
# when custom_vmap is involved and the custom vmap rule is linear.
@jax.custom_batching.custom_vmap
def f_linear(x): return 7. * x
@f_linear.def_vmap
def linear_rule(axis_size, in_batched, xs):
return 11. * xs, in_batched[0]
@jax.custom_batching.custom_vmap
def f_nonlinear(x): return jnp.sin(x)
@f_nonlinear.def_vmap
def nonlinear_rule(axis_size, in_batched, xs):
return jnp.cos(xs), in_batched[0]
f_lin_jvp = lambda x, tx: api.jvp(f_linear, [x], [tx])
f_non_jvp = lambda x, tx: api.jvp(f_nonlinear, [x], [tx])
xs = jnp.arange(3.)
tx = jnp.array(4., dtype=xs.dtype)
# doesn't err
_ = api.vmap(f_lin_jvp, in_axes=(0, None), out_axes=(0, None))(xs, tx)
# does err
self.assertRaisesRegex(
ValueError, "at vmap out_axes",
lambda: api.vmap(
f_non_jvp, in_axes=(0, None), out_axes=(0, None))(xs, tx))
def test_jvp_dataflow_violation(self):
# The jvp-of-custom-vmap machinery should not assume the standard
# dataflow constraint on the JVP of the custom vmap rule (primal
# outputs independent of tangent inputs). Both jvp and vmap are
# "forward" transformations under which, at present, we don't
# enforce the JVP dependence diagram. Because output primals can
# depend on input tangents, extra-batched input tangents can
# create batched output primals, as this test checks.
@jax.custom_jvp
def cos_with_invalid_dataflow_jvp(x): return jnp.cos(x)
@cos_with_invalid_dataflow_jvp.defjvp
def invalid_dataflow_jvp(x, tx):
[x], [tx] = x, tx
return jnp.cos(x * tx), tx
@jax.custom_batching.custom_vmap
def f(x): return jnp.sin(x)
@f.def_vmap
def rule(axis_size, in_batched, xs):
return cos_with_invalid_dataflow_jvp(xs), in_batched[0]
f_jvp = lambda x, tx: api.jvp(f, [x], [tx])
txs = 2. + jnp.arange(3.)
x = jnp.array(1, dtype=txs.dtype)
# doesn't err
ys, tys = api.vmap(f_jvp, in_axes=(None, 0))(x, txs)
self.assertAllClose(ys, jnp.cos(x * txs))
self.assertAllClose(tys, txs)
# does err
self.assertRaisesRegex(
ValueError, "at vmap out_axes",
lambda: api.vmap(
f_jvp, in_axes=(None, 0), out_axes=(None, 0))(x, txs))
def test_tree(self):
tree_sin = partial(jax.tree.map, jnp.sin)
tree_cos = partial(jax.tree.map, jnp.cos)
x, xs = jnp.array(1.), jnp.arange(3)
x = (x, [x + 1, x + 2], [x + 3], x + 4)
xs = (xs, [xs + 1, xs + 2], [xs + 3], xs + 4)
in_batched_ref = jax.tree.map(lambda _: True, x)
@jax.custom_batching.custom_vmap
def f(xs): return tree_sin(xs)
@f.def_vmap
def rule(axis_size, in_batched, xs):
self.assertEqual(in_batched, [in_batched_ref])
sz, = {z.shape[0] for z in jax.tree.leaves(xs)}
self.assertEqual(axis_size, sz)
return tree_cos(xs), in_batched[0]
y = f(x)
self.assertAllClose(y, tree_sin(x))
ys = api.vmap(f)(xs)
self.assertAllClose(ys, tree_cos(xs))
def test_tree_with_nones(self):
tree_sin = partial(jax.tree.map, jnp.sin)
tree_cos = partial(jax.tree.map, jnp.cos)
x, xs = jnp.array(1.), jnp.arange(3)
x = (x, [x + 1, None], [x + 3], None)
xs = (xs, [xs + 1, None], [xs + 3], None)
in_batched_ref = jax.tree.map(lambda _: True, x)
@jax.custom_batching.custom_vmap
def f(xs): return tree_sin(xs)
@f.def_vmap
def rule(axis_size, in_batched, xs):
self.assertEqual(in_batched, [in_batched_ref])
sz, = {z.shape[0] for z in jax.tree.leaves(xs)}
self.assertEqual(axis_size, sz)
return tree_cos(xs), in_batched[0]
y = f(x)
self.assertAllClose(y, tree_sin(x))
ys = api.vmap(f)(xs)
self.assertAllClose(ys, tree_cos(xs))
def test_jit(self):
@jax.custom_batching.custom_vmap
def f(x): return jnp.sin(x)
@f.def_vmap
def rule(axis_size, in_batched, xs):
self.assertEqual(in_batched, [True])
self.assertEqual(axis_size, xs.shape[0])
return jnp.cos(xs), in_batched[0]
x, xs = jnp.array(1.), jnp.arange(3)
self.assertAllClose(f(x), jit(f)(x))
self.assertAllClose(jit(api.vmap(f))(xs), api.vmap(f)(xs))
self.assertAllClose(api.vmap(jit(f))(xs), api.vmap(f)(xs))
def test_sequential_vmap_basic(self):
@jax.custom_batching.sequential_vmap
def f(x):
return x + 1.
def vmap_ref(xs):
return lax.map(f, xs)
xs = jnp.arange(3.)
jaxpr = api.make_jaxpr(api.vmap(f))(xs)
jaxpr_ref = api.make_jaxpr(vmap_ref)(xs)
self.assertEqual(str(jaxpr), str(jaxpr_ref))
def test_sequential_vmap_nary_same_batching(self):
@jax.custom_batching.sequential_vmap
def f(x, y):
return x + y
def vmap_ref(xs, ys):
return lax.map(lambda args: f(*args), (xs, ys))
xs, ys = jnp.arange(3.), 4. + jnp.arange(3.)
jaxpr = api.make_jaxpr(api.vmap(f))(xs, ys)
jaxpr_ref = api.make_jaxpr(vmap_ref)(xs, ys)
self.assertEqual(str(jaxpr), str(jaxpr_ref))
def test_sequential_vmap_nary_mixed_batching(self):
@jax.custom_batching.sequential_vmap
def f(x, y):
return x + y
def vmap_ref(xs, y):
return lax.map(lambda x: f(x, y), xs)
xs, y = jnp.arange(3.), 4.
jaxpr = api.make_jaxpr(api.vmap(f, in_axes=(0, None)))(xs, y)
jaxpr_ref = api.make_jaxpr(vmap_ref)(xs, y)
self.assertEqual(str(jaxpr), str(jaxpr_ref))
@parameterized.named_parameters(
("1", 1),
("8", 4),
("12", 8),
("16", 16),
)
def test_batch_map_basic(self, batch_size: int):
def f(x):
self.assertEqual(x.shape, ())
return x**2
x = np.arange(16)
y = jax.lax.map(f, x, batch_size=batch_size)
np.testing.assert_array_equal(y, x**2)
@parameterized.named_parameters(
("1", 1),
("8", 4),
("12", 8),
("16", 16),
)
def test_batch_map_pytrees(self, batch_size: int):
f = lambda x: {'b': x['a'] ** 2}
inputs = {'a': np.arange(16)}
expected = np.arange(16) ** 2
outputs = jax.lax.map(f, inputs, batch_size=batch_size)
self.assertAllClose(outputs['b'], expected)
outputs = jax.lax.map(
f, inputs, batch_size=batch_size
)
self.assertAllClose(outputs['b'], expected)
def test_batch_divides_axis(self):
def f(t):
x, a = t
self.assertEqual(x.shape, (4,))
return (x + a)**2
x = jax.random.randint(jax.random.key(0), (16, 4), -10, 10)
a = jax.random.randint(jax.random.key(1), (16, 4), -10, 10)
@jax.jit
def g(x, a):
return jax.lax.map(f, (x, a), batch_size=8)
y = g(x, a)
self.assertAllClose(y, (x + a)**2)
def test_undefined_rule(self):
@jax.custom_batching.custom_vmap
def f(x): return jnp.sin(x)
with self.assertRaisesRegex(
AttributeError, "No batching rule defined for custom_vmap function f"):
f(0.5)
def test_kwargs(self):
@jax.custom_batching.custom_vmap
def f(x): return jnp.sin(x)
@f.def_vmap
def rule(axis_size, in_batched, xs):
xs_batched, = in_batched
self.assertEqual(xs_batched, True)
self.assertEqual(axis_size, xs.shape[0])
return jnp.cos(xs), xs_batched
x, xs = jnp.array(1.), jnp.arange(3)
y = f(x=x)
self.assertAllClose(y, jnp.sin(x))
ys = api.vmap(f)(x=xs)
self.assertAllClose(ys, jnp.cos(xs))
def test_partial_eval_raises(self):
@jax.custom_batching.custom_vmap
def f(x):
return jnp.sin(x)
@f.def_vmap
def rule(axis_size, in_batched, xs):
del axis_size # unused
return jnp.cos(xs), in_batched[0]
with self.assertRaisesRegex(
ValueError,
"Linearization failed to produce known values for all output primals",
):
jax.grad(f)(0.5)
def test_compose_custom_vjp(self):
@jax.custom_vjp
@jax.custom_batching.custom_vmap
def f(x, y):
return jnp.sin(x) * y
@f.def_vmap
def f_vmap_rule(axis_size, in_batched, xs, ys):
return jnp.cos(xs) * ys, True
def f_fwd(x, y):
return f(x, y), (jnp.cos(x), jnp.sin(x), y)
def f_bwd(res, g):
cos_x, sin_x, y = res
return (cos_x * g * y, sin_x * g)
f.defvjp(f_fwd, f_bwd)
xs = jnp.linspace(0, 1, 5)
ys = jnp.linspace(-0.1, 0.1, 5)
self.assertAllClose(jax.vmap(f)(xs, ys), jnp.cos(xs) * ys)
jax.grad(f)(xs[0], ys[0]) # Doesn't crash.
def test_compose_custom_vjp_bwd_rule(self):
# This tests the case where both the forward and backward rules are wrapped
# in custom_vmap.
@jax.custom_batching.sequential_vmap
def fun_fwd(x, y):
return jnp.sin(x) * y, (x, y)
@jax.custom_batching.sequential_vmap
def fun_bwd(res, ct):
x, y = res
return x * ct, y * ct
fun = jax.custom_vjp(lambda *args: fun_fwd(*args)[0])
fun.defvjp(fun_fwd, fun_bwd)
xs = jnp.linspace(0, 1, 5)
y = jnp.array(0.5, dtype=xs.dtype)
f = jax.vmap(jax.jit(fun), in_axes=(0, None))
out, f_vjp = jax.vjp(f, xs, y)
f_vjp(out) # Doesn't crash.
def test_resolve_kwargs_error_message(self):
@jax.custom_batching.custom_vmap
def f(x, y, *, z=None):
return jnp.sin(x) * y
@f.def_vmap
def f_vmap_rule(axis_size, in_batched, xs, ys):
self.fail("should not be executed")
with self.assertRaisesRegex(
TypeError,
r"The input arguments to the custom_vmap-decorated function f(.*)\n"
r"missing a required argument: 'y'"
):
f(0.5)
with self.assertRaisesRegex(
TypeError,
r"The input arguments to the custom_vmap-decorated function f(.*)\n"
"The following keyword arguments could not be resolved to positions: z"
):
f(0.5, 0.1, z=1.0)
| CustomVmapTest |
python | getsentry__sentry | src/sentry/incidents/endpoints/bases.py | {
"start": 1447,
"end": 2340
} | class ____(OrganizationEndpoint):
permission_classes = (OrganizationAlertRulePermission,)
def convert_args(
self, request: Request, alert_rule_id: int, *args: Any, **kwargs: Any
) -> tuple[tuple[Any, ...], dict[str, Any]]:
args, kwargs = super().convert_args(request, *args, **kwargs)
organization = kwargs["organization"]
# Allow orgs that have downgraded plans to delete metric alerts
if request.method != "DELETE" and not features.has(
"organizations:incidents", organization, actor=request.user
):
raise ResourceDoesNotExist
try:
kwargs["alert_rule"] = AlertRule.objects.get(
organization=organization, id=alert_rule_id
)
except AlertRule.DoesNotExist:
raise ResourceDoesNotExist
return args, kwargs
| OrganizationAlertRuleEndpoint |
python | getsentry__sentry | src/sentry/testutils/silo.py | {
"start": 3604,
"end": 4043
} | class ____(Exception):
pass
def _get_test_name_suffix(silo_mode: SiloMode) -> str:
name = silo_mode.name[0].upper() + silo_mode.name[1:].lower()
return f"__In{name}Mode"
def strip_silo_mode_test_suffix(name: str) -> str:
for silo_mode in SiloMode:
suffix = _get_test_name_suffix(silo_mode)
if name.endswith(suffix):
return name[: -len(suffix)]
return name
| SubclassNotSiloDecoratedException |
python | huggingface__transformers | src/transformers/models/mllama/modeling_mllama.py | {
"start": 34175,
"end": 43201
} | class ____(PreTrainedModel):
config: MllamaConfig
base_model_prefix = "model"
input_modalities = ("image", "text")
supports_gradient_checkpointing = True
_no_split_modules = [
"MllamaVisionEncoderLayer",
"MllamaCrossAttentionDecoderLayer",
"MllamaSelfAttentionDecoderLayer",
]
_can_compile_fullgraph = False # static cache cannot have different shapes for each layer
_supports_sdpa = True
_supports_flash_attn = True
_supports_flex_attn = True
_supports_attention_backend = True
_can_record_outputs = {
"hidden_states": [MllamaSelfAttentionDecoderLayer, MllamaCrossAttentionDecoderLayer],
"attentions": [
OutputRecorder(MllamaTextSelfAttention, index=1, layer_name="self_attn"),
OutputRecorder(MllamaTextSelfAttention, index=1, layer_name="cross_attn"),
OutputRecorder(MllamaTextCrossAttention, index=1, layer_name="cross_attn"),
],
}
@torch.no_grad()
def _init_weights(self, module):
std = getattr(self.config, "initializer_range", self.config.get_text_config().initializer_range)
if isinstance(module, (nn.Linear, nn.Conv2d)):
init.normal_(module.weight, mean=0.0, std=std)
if module.bias is not None:
init.zeros_(module.bias)
elif isinstance(module, nn.Embedding):
init.normal_(module.weight, mean=0.0, std=std)
# Here we need the check explicitly, as we slice the weight in the `zeros_` call, so it looses the flag
if module.padding_idx is not None and not getattr(module.weight, "_is_hf_initialized", False):
init.zeros_(module.weight[module.padding_idx])
elif isinstance(module, nn.LayerNorm):
init.ones_(module.weight)
init.zeros_(module.bias)
elif isinstance(module, MllamaTextRMSNorm):
init.ones_(module.weight)
elif isinstance(module, MllamaVisionModel):
init.normal_(module.class_embedding, std=std)
elif isinstance(module, MllamaPrecomputedPositionEmbedding):
init.normal_(module.embedding, std=std)
init.zeros_(module.gate)
elif isinstance(module, MllamaVisionEncoderLayer) and module.is_gated:
init.normal_(module.gate_attn, std=std)
init.normal_(module.gate_ffn, std=std)
elif isinstance(module, MllamaCrossAttentionDecoderLayer):
init.zeros_(module.cross_attn_attn_gate)
init.zeros_(module.cross_attn_mlp_gate)
elif isinstance(module, MllamaPrecomputedAspectRatioEmbedding):
if module.is_gated:
init.zeros_(module.gate)
# Copied from transformers.models.gptj.modeling_gptj.GPTJModel._update_causal_mask
def _update_causal_mask(
self,
attention_mask: Union[torch.Tensor, "BlockMask"],
input_tensor: torch.Tensor,
cache_position: torch.Tensor,
past_key_values: Cache,
output_attentions: bool = False,
):
if self.config._attn_implementation == "flash_attention_2":
if attention_mask is not None and (attention_mask == 0.0).any():
return attention_mask
return None
if self.config._attn_implementation == "flex_attention":
if isinstance(attention_mask, torch.Tensor):
attention_mask = make_flex_block_causal_mask(attention_mask)
return attention_mask
# For SDPA, when possible, we will rely on its `is_causal` argument instead of its `attn_mask` argument, in
# order to dispatch on Flash Attention 2. This feature is not compatible with static cache, as SDPA will fail
# to infer the attention mask.
past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
using_compilable_cache = past_key_values.is_compileable if past_key_values is not None else False
# When output attentions is True, sdpa implementation's forward method calls the eager implementation's forward
if self.config._attn_implementation == "sdpa" and not using_compilable_cache and not output_attentions:
if AttentionMaskConverter._ignore_causal_mask_sdpa(
attention_mask,
inputs_embeds=input_tensor,
past_key_values_length=past_seen_tokens,
is_training=self.training,
):
return None
dtype = input_tensor.dtype
sequence_length = input_tensor.shape[1]
if using_compilable_cache:
target_length = past_key_values.get_max_cache_shape()
else:
target_length = (
attention_mask.shape[-1]
if isinstance(attention_mask, torch.Tensor)
else past_seen_tokens + sequence_length + 1
)
# In case the provided `attention` mask is 2D, we generate a causal mask here (4D).
causal_mask = self._prepare_4d_causal_attention_mask_with_cache_position(
attention_mask,
sequence_length=sequence_length,
target_length=target_length,
dtype=dtype,
cache_position=cache_position,
batch_size=input_tensor.shape[0],
)
if (
self.config._attn_implementation == "sdpa"
and attention_mask is not None
and attention_mask.device.type in ["cuda", "xpu", "npu"]
and not output_attentions
):
# Attend to all tokens in fully masked rows in the causal_mask, for example the relevant first rows when
# using left padding. This is required by F.scaled_dot_product_attention memory-efficient attention path.
# Details: https://github.com/pytorch/pytorch/issues/110213
min_dtype = torch.finfo(dtype).min
causal_mask = AttentionMaskConverter._unmask_unattended(causal_mask, min_dtype)
return causal_mask
@staticmethod
# Copied from transformers.models.gptj.modeling_gptj.GPTJModel._prepare_4d_causal_attention_mask_with_cache_position
def _prepare_4d_causal_attention_mask_with_cache_position(
attention_mask: torch.Tensor,
sequence_length: int,
target_length: int,
dtype: torch.dtype,
cache_position: torch.Tensor,
batch_size: int,
**kwargs,
):
"""
Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape
`(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing.
Args:
attention_mask (`torch.Tensor`):
A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape
`(batch_size, 1, query_length, key_value_length)`.
sequence_length (`int`):
The sequence length being processed.
target_length (`int`):
The target length: when generating with static cache, the mask should be as long as the static cache,
to account for the 0 padding, the part of the cache that is not filled yet.
dtype (`torch.dtype`):
The dtype to use for the 4D attention mask.
cache_position (`torch.Tensor`):
Indices depicting the position of the input sequence tokens in the sequence.
batch_size (`torch.Tensor`):
Batch size.
"""
if attention_mask is not None and attention_mask.dim() == 4:
# In this case we assume that the mask comes already in inverted form and requires no inversion or slicing.
causal_mask = attention_mask
else:
min_dtype = torch.finfo(dtype).min
causal_mask = torch.full(
(sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=cache_position.device
)
if sequence_length != 1:
causal_mask = torch.triu(causal_mask, diagonal=1)
causal_mask *= torch.arange(target_length, device=cache_position.device) > cache_position.reshape(-1, 1)
causal_mask = causal_mask[None, None, :, :].expand(batch_size, 1, -1, -1)
if attention_mask is not None:
causal_mask = causal_mask.clone() # copy to contiguous memory for in-place edit
mask_length = attention_mask.shape[-1]
padding_mask = causal_mask[:, :, :, :mask_length] + attention_mask[:, None, None, :].to(
causal_mask.device
)
padding_mask = padding_mask == 0
causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill(
padding_mask, min_dtype
)
return causal_mask
@auto_docstring(
custom_intro="""
The Mllama Vision Model which consists of two vision encoders.
"""
)
| MllamaPreTrainedModel |
python | django__django | tests/test_client_regress/tests.py | {
"start": 29309,
"end": 29899
} | class ____(TestDataMixin, TestCase):
def test_login(self):
"A session engine that modifies the session key can be used to log in"
login = self.client.login(username="testclient", password="password")
self.assertTrue(login, "Could not log in")
# Try to access a login protected page.
response = self.client.get("/login_protected_view/")
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context["user"].username, "testclient")
@override_settings(
ROOT_URLCONF="test_client_regress.urls",
)
| SessionEngineTests |
python | langchain-ai__langchain | libs/langchain/langchain_classic/agents/agent.py | {
"start": 1919,
"end": 6871
} | class ____(BaseModel):
"""Base Single Action Agent class."""
@property
def return_values(self) -> list[str]:
"""Return values of the agent."""
return ["output"]
def get_allowed_tools(self) -> list[str] | None:
"""Get allowed tools."""
return None
@abstractmethod
def plan(
self,
intermediate_steps: list[tuple[AgentAction, str]],
callbacks: Callbacks = None,
**kwargs: Any,
) -> AgentAction | AgentFinish:
"""Given input, decided what to do.
Args:
intermediate_steps: Steps the LLM has taken to date,
along with observations.
callbacks: Callbacks to run.
**kwargs: User inputs.
Returns:
Action specifying what tool to use.
"""
@abstractmethod
async def aplan(
self,
intermediate_steps: list[tuple[AgentAction, str]],
callbacks: Callbacks = None,
**kwargs: Any,
) -> AgentAction | AgentFinish:
"""Async given input, decided what to do.
Args:
intermediate_steps: Steps the LLM has taken to date,
along with observations.
callbacks: Callbacks to run.
**kwargs: User inputs.
Returns:
Action specifying what tool to use.
"""
@property
@abstractmethod
def input_keys(self) -> list[str]:
"""Return the input keys."""
def return_stopped_response(
self,
early_stopping_method: str,
intermediate_steps: list[tuple[AgentAction, str]], # noqa: ARG002
**_: Any,
) -> AgentFinish:
"""Return response when agent has been stopped due to max iterations.
Args:
early_stopping_method: Method to use for early stopping.
intermediate_steps: Steps the LLM has taken to date,
along with observations.
Returns:
Agent finish object.
Raises:
ValueError: If `early_stopping_method` is not supported.
"""
if early_stopping_method == "force":
# `force` just returns a constant string
return AgentFinish(
{"output": "Agent stopped due to iteration limit or time limit."},
"",
)
msg = f"Got unsupported early_stopping_method `{early_stopping_method}`"
raise ValueError(msg)
@classmethod
def from_llm_and_tools(
cls,
llm: BaseLanguageModel,
tools: Sequence[BaseTool],
callback_manager: BaseCallbackManager | None = None,
**kwargs: Any,
) -> BaseSingleActionAgent:
"""Construct an agent from an LLM and tools.
Args:
llm: Language model to use.
tools: Tools to use.
callback_manager: Callback manager to use.
kwargs: Additional arguments.
Returns:
Agent object.
"""
raise NotImplementedError
@property
def _agent_type(self) -> str:
"""Return Identifier of an agent type."""
raise NotImplementedError
@override
def dict(self, **kwargs: Any) -> builtins.dict:
"""Return dictionary representation of agent.
Returns:
Dictionary representation of agent.
"""
_dict = super().model_dump()
try:
_type = self._agent_type
except NotImplementedError:
_type = None
if isinstance(_type, AgentType):
_dict["_type"] = str(_type.value)
elif _type is not None:
_dict["_type"] = _type
return _dict
def save(self, file_path: Path | str) -> None:
"""Save the agent.
Args:
file_path: Path to file to save the agent to.
Example:
```python
# If working with agent executor
agent.agent.save(file_path="path/agent.yaml")
```
"""
# Convert file to Path object.
save_path = Path(file_path) if isinstance(file_path, str) else file_path
directory_path = save_path.parent
directory_path.mkdir(parents=True, exist_ok=True)
# Fetch dictionary to save
agent_dict = self.dict()
if "_type" not in agent_dict:
msg = f"Agent {self} does not support saving"
raise NotImplementedError(msg)
if save_path.suffix == ".json":
with save_path.open("w") as f:
json.dump(agent_dict, f, indent=4)
elif save_path.suffix.endswith((".yaml", ".yml")):
with save_path.open("w") as f:
yaml.dump(agent_dict, f, default_flow_style=False)
else:
msg = f"{save_path} must be json or yaml"
raise ValueError(msg)
def tool_run_logging_kwargs(self) -> builtins.dict:
"""Return logging kwargs for tool run."""
return {}
| BaseSingleActionAgent |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 305020,
"end": 307436
} | class ____(sgqlc.types.Input):
"""Autogenerated input type of StartRepositoryMigration"""
__schema__ = github_schema
__field_names__ = (
"source_id",
"owner_id",
"source_repository_url",
"repository_name",
"continue_on_error",
"git_archive_url",
"metadata_archive_url",
"access_token",
"github_pat",
"skip_releases",
"target_repo_visibility",
"lock_source",
"client_mutation_id",
)
source_id = sgqlc.types.Field(sgqlc.types.non_null(ID), graphql_name="sourceId")
"""The ID of the migration source."""
owner_id = sgqlc.types.Field(sgqlc.types.non_null(ID), graphql_name="ownerId")
"""The ID of the organization that will own the imported repository."""
source_repository_url = sgqlc.types.Field(URI, graphql_name="sourceRepositoryUrl")
"""The URL of the source repository."""
repository_name = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="repositoryName")
"""The name of the imported repository."""
continue_on_error = sgqlc.types.Field(Boolean, graphql_name="continueOnError")
"""Whether to continue the migration on error. Defaults to `false`."""
git_archive_url = sgqlc.types.Field(String, graphql_name="gitArchiveUrl")
"""The signed URL to access the user-uploaded git archive."""
metadata_archive_url = sgqlc.types.Field(String, graphql_name="metadataArchiveUrl")
"""The signed URL to access the user-uploaded metadata archive."""
access_token = sgqlc.types.Field(String, graphql_name="accessToken")
"""The migration source access token."""
github_pat = sgqlc.types.Field(String, graphql_name="githubPat")
"""The GitHub personal access token of the user importing to the
target repository.
"""
skip_releases = sgqlc.types.Field(Boolean, graphql_name="skipReleases")
"""Whether to skip migrating releases for the repository."""
target_repo_visibility = sgqlc.types.Field(String, graphql_name="targetRepoVisibility")
"""The visibility of the imported repository."""
lock_source = sgqlc.types.Field(Boolean, graphql_name="lockSource")
"""Whether to lock the source repository."""
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
"""A unique identifier for the client performing the mutation."""
| StartRepositoryMigrationInput |
python | django__django | tests/postgres_tests/models.py | {
"start": 2546,
"end": 2645
} | class ____(PostgreSQLModel):
array_of_enums = ArrayField(EnumField(max_length=20))
| ArrayEnumModel |
python | sphinx-doc__sphinx | tests/roots/test-ext-autosummary-ext/underscore_module_.py | {
"start": 52,
"end": 205
} | class ____:
"""Class"""
def method_(_arg): # NoQA: N805
"""Method"""
pass
def function_(_arg):
"""Function"""
pass
| class_ |
python | spack__spack | lib/spack/spack/vendor/ruamel/yaml/loader.py | {
"start": 2412,
"end": 3162
} | class ____(
Reader,
RoundTripScanner,
RoundTripParser,
Composer,
RoundTripConstructor,
VersionedResolver,
):
def __init__(self, stream, version=None, preserve_quotes=None):
# type: (StreamTextType, Optional[VersionType], Optional[bool]) -> None
# self.reader = Reader.__init__(self, stream)
self.comment_handling = None # issue 385
Reader.__init__(self, stream, loader=self)
RoundTripScanner.__init__(self, loader=self)
RoundTripParser.__init__(self, loader=self)
Composer.__init__(self, loader=self)
RoundTripConstructor.__init__(self, preserve_quotes=preserve_quotes, loader=self)
VersionedResolver.__init__(self, version, loader=self)
| RoundTripLoader |
python | pikepdf__pikepdf | src/pikepdf/form.py | {
"start": 22371,
"end": 25171
} | class ____(_FieldWrapper):
"""Represents a signature field.
Signatures are not truly supported.
"""
def stamp_overlay(
self,
overlay: Object | Page,
*,
expand_rect: int
| float
| Decimal
| Sequence[int | float | Decimal]
| None = None,
) -> Name:
"""Stamp an image over the top of a signature field.
This is *not* true support for PDF signatures. Rather, it is merely a utility
for adding an image to the PDF at the location of a signature field.
This uses `pikepdf.Page.add_overlay` under the hood, see that method for
additional usage information.
If the bounding box of the signature field is smaller than the "visual"
signature area in the PDF, you may use the ``expand_rect`` parameter to increase
the dimensions of the rectangle when stamping. This may be any of the
following types:
* A number, which will be added equally to all sides of the box
* A sequence of two numbers, which will be added on the X and Y axis,
respectively
* A sequence of four numbers, which will be added to the left, bottom, right,
and top sides respectively
Positive numbers will increase the size of the box, and negative numbers will
decease it.
"""
# There is allowed to be only one annot per sig field, see 12.7.5.5
field_annot = self._form._acroform.get_annotations_for_field(self._field)[0]
if Name.P in field_annot.obj:
# The annot keeps a reference to the page (not always the case)
Page(field_annot.obj.P).add_overlay(
overlay, self._expand_rect(field_annot.rect, expand_rect)
)
for page in self._form._pdf.pages:
# Fall back to looping through all possible pages.
for annot in self._form._acroform.get_widget_annotations_for_page(page):
if annot == field_annot:
return page.add_overlay(
overlay, self._expand_rect(annot.rect, expand_rect)
)
raise ValueError("Could not find annotation for signature field")
def _expand_rect(self, rect: Rectangle, expand_by: int | float | Decimal | None):
if expand_by is None:
return rect
if isinstance(expand_by, int | float | Decimal):
expand_by = (expand_by, expand_by, expand_by, expand_by)
if len(expand_by) == 2:
expand_by = (*expand_by, *expand_by)
return Rectangle(
rect.llx - float(expand_by[0]),
rect.lly - float(expand_by[1]),
rect.urx + float(expand_by[2]),
rect.ury + float(expand_by[3]),
)
| SignatureField |
python | dagster-io__dagster | python_modules/libraries/dagster-tableau/dagster_tableau/asset_utils.py | {
"start": 396,
"end": 6370
} | class ____(
namedtuple("_ParsedTableauAssetSpecs", ["external_asset_specs", "materializable_asset_specs"])
):
"""Used to represent the parsed Tableau asset specs
as returned by the `parse_tableau_external_and_materializable_asset_specs` function below.
"""
def __new__(cls, external_asset_specs, materializable_asset_specs):
return super().__new__(
cls,
external_asset_specs=check.list_param(
external_asset_specs, "external_asset_specs", AssetSpec
),
materializable_asset_specs=check.list_param(
materializable_asset_specs, "materializable_asset_specs", AssetSpec
),
)
def parse_tableau_external_and_materializable_asset_specs(
specs: Sequence[AssetSpec],
include_data_sources_with_extracts: bool = False,
) -> ParsedTableauAssetSpecs:
"""Parses a list of Tableau AssetSpecs provided as input and return two lists of AssetSpecs,
one for the Tableau external assets and another one for the Tableau materializable assets.
In Tableau, data sources are considered external assets,
while sheets and dashboards are considered materializable assets.
Args:
specs (Sequence[AssetSpec]): The asset specs of the assets in the Tableau workspace.
include_data_sources_with_extracts (bool):
Whether to include published data sources with extracts in materializable assets.
Returns:
ParsedTableauAssetSpecs: A named tuple representing the parsed Tableau asset specs
as `external_asset_specs` and `materializable_asset_specs`.
"""
data_source_asset_specs = [
spec for spec in specs if TableauTagSet.extract(spec.tags).asset_type == "data_source"
]
materializable_data_source_asset_specs, non_materializable_data_source_asset_specs = [], []
for spec in data_source_asset_specs:
# Embedded data sources with extract can't be refreshed using the "Update Data Source Now" endpoint
# https://help.tableau.com/current/api/rest_api/en-us/REST/rest_api_ref_data_sources.htm#update_data_source_now
if (
TableauDataSourceMetadataSet.extract(spec.metadata).has_extracts
and TableauDataSourceMetadataSet.extract(spec.metadata).is_published
):
materializable_data_source_asset_specs.append(spec)
else:
non_materializable_data_source_asset_specs.append(spec)
view_asset_specs = [
spec
for spec in specs
if TableauTagSet.extract(spec.tags).asset_type in ["dashboard", "sheet"]
]
external_asset_specs = (
non_materializable_data_source_asset_specs
if include_data_sources_with_extracts
else data_source_asset_specs
)
materializable_asset_specs = (
materializable_data_source_asset_specs + view_asset_specs
if include_data_sources_with_extracts
else view_asset_specs
)
return ParsedTableauAssetSpecs(
external_asset_specs=external_asset_specs,
materializable_asset_specs=materializable_asset_specs,
)
def create_view_asset_event(
view: TSC.ViewItem, spec: AssetSpec, refreshed_workbook_ids: Set[str]
) -> Iterator[Union[AssetObservation, Output]]:
asset_key = spec.key
workbook_id = TableauViewMetadataSet.extract(spec.metadata).workbook_id
if workbook_id and workbook_id in refreshed_workbook_ids:
yield from create_asset_output(
asset_key=asset_key, data=view, additional_metadata={"workbook_id": view.workbook_id}
)
else:
yield from create_asset_observation(
asset_key=asset_key, data=view, additional_metadata={"workbook_id": view.workbook_id}
)
def create_data_source_asset_event(
data_source: TSC.DatasourceItem, spec: AssetSpec, refreshed_data_source_ids: Set[str]
) -> Iterator[Union[AssetObservation, Output]]:
asset_key = spec.key
data_source_id = TableauDataSourceMetadataSet.extract(spec.metadata).id
if data_source_id and data_source_id in refreshed_data_source_ids:
yield from create_asset_output(
asset_key=asset_key, data=data_source, additional_metadata={"id": data_source.id}
)
else:
yield from create_asset_observation(
asset_key=asset_key, data=data_source, additional_metadata={"id": data_source.id}
)
def create_view_asset_observation(
view: TSC.ViewItem,
spec: AssetSpec,
) -> Iterator[AssetObservation]:
asset_key = spec.key
yield from create_asset_observation(
asset_key=asset_key, data=view, additional_metadata={"workbook_id": view.workbook_id}
)
def create_asset_output(
asset_key: AssetKey,
data: Union[TSC.DatasourceItem, TSC.ViewItem],
additional_metadata: Mapping[str, Any],
) -> Iterator[Output]:
yield Output(
value=None,
output_name="__".join(asset_key.path),
metadata={
**additional_metadata,
"owner_id": data.owner_id,
"name": data.name,
"contentUrl": data.content_url,
"createdAt": data.created_at.strftime("%Y-%m-%dT%H:%M:%S") if data.created_at else None,
"updatedAt": data.updated_at.strftime("%Y-%m-%dT%H:%M:%S") if data.updated_at else None,
},
)
def create_asset_observation(
asset_key: AssetKey,
data: Union[TSC.DatasourceItem, TSC.ViewItem],
additional_metadata: Mapping[str, Any],
) -> Iterator[AssetObservation]:
yield AssetObservation(
asset_key=asset_key,
metadata={
**additional_metadata,
"owner_id": data.owner_id,
"name": data.name,
"contentUrl": data.content_url,
"createdAt": data.created_at.strftime("%Y-%m-%dT%H:%M:%S") if data.created_at else None,
"updatedAt": data.updated_at.strftime("%Y-%m-%dT%H:%M:%S") if data.updated_at else None,
},
)
| ParsedTableauAssetSpecs |
python | charliermarsh__ruff | crates/ruff_python_formatter/resources/test/fixtures/ruff/fmt_skip/compound_one_liners.py | {
"start": 994,
"end": 4395
} | class ____(Generic[T]): pass # fmt: skip
# Try/except blocks
try: risky_operation() # fmt: skip
except ValueError: handle_error() # fmt: skip
except: handle_any_error() # fmt: skip
else: success_case() # fmt: skip
finally: cleanup() # fmt: skip
# Match statements (Python 3.10+)
match value:
case 1: print("one") # fmt: skip
case _: print("other") # fmt: skip
# With statements
with open("file.txt") as f: content = f.read() # fmt: skip
with context_manager() as cm: result = cm.process() # fmt: skip
# Async variants
async def async_func(): return await some_call() # fmt: skip
async for item in async_iterator(): await process(item) # fmt: skip
async with async_context() as ctx: await ctx.work() # fmt: skip
# Complex expressions that would normally format
def complex_expr(): return [x for x in range(100) if x % 2 == 0 and x > 50] # fmt: skip
if condition_a and condition_b or (condition_c and not condition_d): execute_complex_logic() # fmt: skip
# Edge case: comment positioning
def func_with_comment(): # some comment
return "value" # fmt: skip
# Edge case: multiple fmt: skip (only last one should matter)
def multiple_skip(): return "test" # fmt: skip # fmt: skip
# Should NOT be affected (already multiline)
def multiline_func():
return "this should format normally"
if long_condition_that_spans \
and continues_on_next_line:
print("multiline condition")
# Mix of skipped and non-skipped
for i in range(10): print(f"item {i}") # fmt: skip
for j in range(5):
print(f"formatted item {j}")
# With trailing comma that would normally be removed
def trailing_comma_func(a, b, c,): return a + b + c # fmt: skip
# Dictionary/list comprehensions
def dict_comp(): return {k: v for k, v in items.items() if v is not None} # fmt: skip
def list_comp(): return [x * 2 for x in numbers if x > threshold_value] # fmt: skip
# Lambda in one-liner
def with_lambda(): return lambda x, y, z: x + y + z if all([x, y, z]) else None # fmt: skip
# String formatting that would normally be reformatted
def format_string(): return f"Hello {name}, you have {count} items in your cart totaling ${total:.2f}" # fmt: skip
# loop else clauses
for i in range(2): print(i) # fmt: skip
else: print("this") # fmt: skip
while foo(): print(i) # fmt: skip
else: print("this") # fmt: skip
# again but only the first skip
for i in range(2): print(i) # fmt: skip
else: print("this")
while foo(): print(i) # fmt: skip
else: print("this")
# again but only the second skip
for i in range(2): print(i)
else: print("this") # fmt: skip
while foo(): print(i)
else: print("this") # fmt: skip
# multiple statements in body
if True: print("this"); print("that") # fmt: skip
# Examples with more comments
try: risky_operation() # fmt: skip
# leading 1
except ValueError: handle_error() # fmt: skip
# leading 2
except: handle_any_error() # fmt: skip
# leading 3
else: success_case() # fmt: skip
# leading 4
finally: cleanup() # fmt: skip
# trailing
# multi-line before colon (should remain as is)
if (
long_condition
): a + b # fmt: skip
# over-indented comment example
# See https://github.com/astral-sh/ruff/pull/20633#issuecomment-3453288910
# and https://github.com/astral-sh/ruff/pull/21185
for x in it: foo()
# comment
else: bar() # fmt: skip
if this(
'is a long',
# commented
'condition'
): with_a_skip # fmt: skip
| GenericClass |
python | crytic__slither | slither/solc_parsing/yul/parse_yul.py | {
"start": 4505,
"end": 6529
} | class ____(metaclass=abc.ABCMeta):
__slots__ = [
"_contract",
"_id",
"_yul_local_variables",
"_yul_local_functions",
"_parent_func",
]
def __init__(
self, contract: Optional[Contract], yul_id: List[str], parent_func: Function
) -> None:
self._contract = contract
self._id: List[str] = yul_id
self._yul_local_variables: List[YulLocalVariable] = []
self._yul_local_functions: List[YulFunction] = []
self._parent_func: Function = parent_func
@property
def id(self) -> List[str]:
return self._id
@property
def contract(self) -> Optional[Contract]:
return self._contract
@property
def compilation_unit(self) -> SlitherCompilationUnit:
return self._parent_func.compilation_unit
@property
def parent_func(self) -> Optional[Function]:
return self._parent_func
@property
@abc.abstractmethod
def function(self) -> Function:
pass
@abc.abstractmethod
def new_node(self, node_type: NodeType, src: Union[str, Dict]) -> YulNode:
pass
@property
def file_scope(self) -> FileScope:
return self._parent_func.file_scope
def add_yul_local_variable(self, var: "YulLocalVariable") -> None:
self._yul_local_variables.append(var)
def get_yul_local_variable_from_name(self, variable_name: str) -> Optional["YulLocalVariable"]:
return next(
(
v
for v in self._yul_local_variables
if v.underlying.name == _name_to_yul_name(variable_name, self.id)
),
None,
)
def add_yul_local_function(self, func: "YulFunction") -> None:
self._yul_local_functions.append(func)
def get_yul_local_function_from_name(self, func_name: str) -> Optional["YulFunction"]:
return next(
(v for v in self._yul_local_functions if v.underlying.name == func_name),
None,
)
| YulScope |
python | doocs__leetcode | solution/3300-3399/3363.Find the Maximum Number of Fruits Collected/Solution.py | {
"start": 0,
"end": 810
} | class ____:
def maxCollectedFruits(self, fruits: List[List[int]]) -> int:
n = len(fruits)
f = [[-inf] * n for _ in range(n)]
f[0][n - 1] = fruits[0][n - 1]
for i in range(1, n):
for j in range(i + 1, n):
f[i][j] = max(f[i - 1][j], f[i - 1][j - 1]) + fruits[i][j]
if j + 1 < n:
f[i][j] = max(f[i][j], f[i - 1][j + 1] + fruits[i][j])
f[n - 1][0] = fruits[n - 1][0]
for j in range(1, n):
for i in range(j + 1, n):
f[i][j] = max(f[i][j - 1], f[i - 1][j - 1]) + fruits[i][j]
if i + 1 < n:
f[i][j] = max(f[i][j], f[i + 1][j - 1] + fruits[i][j])
return sum(fruits[i][i] for i in range(n)) + f[n - 2][n - 1] + f[n - 1][n - 2]
| Solution |
python | getsentry__sentry | src/sentry/api/serializers/models/event.py | {
"start": 19912,
"end": 21034
} | class ____(EventSerializer):
def get_attrs(self, item_list, user, **kwargs):
return super().get_attrs(item_list, user, is_public=True, **kwargs)
def serialize(self, obj, attrs, user, **kwargs):
base = super().serialize(obj, attrs, user)
result: dict[str, Any] = {
k: v
for k, v in base.items()
if k not in {"context", "contexts", "user", "tags", "sdk", "errors"}
}
result["entries"] = [e for e in result["entries"] if e["type"] != "breadcrumbs"]
return result
SimpleEventSerializerResponse = TypedDict(
"SimpleEventSerializerResponse",
{
"id": str,
"event.type": str,
"groupID": str | None,
"eventID": str,
"projectID": str,
"message": str,
"title": str,
"location": str | None,
"culprit": str | None,
"user": EventUserApiContext | None,
"tags": list[EventTag],
"platform": str | None,
"dateCreated": datetime,
"crashFile": str | None,
"metadata": dict[str, Any] | None,
},
)
| SharedEventSerializer |
python | kubernetes-client__python | kubernetes/client/models/v1beta2_resource_slice.py | {
"start": 383,
"end": 6872
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'api_version': 'str',
'kind': 'str',
'metadata': 'V1ObjectMeta',
'spec': 'V1beta2ResourceSliceSpec'
}
attribute_map = {
'api_version': 'apiVersion',
'kind': 'kind',
'metadata': 'metadata',
'spec': 'spec'
}
def __init__(self, api_version=None, kind=None, metadata=None, spec=None, local_vars_configuration=None): # noqa: E501
"""V1beta2ResourceSlice - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._api_version = None
self._kind = None
self._metadata = None
self._spec = None
self.discriminator = None
if api_version is not None:
self.api_version = api_version
if kind is not None:
self.kind = kind
if metadata is not None:
self.metadata = metadata
self.spec = spec
@property
def api_version(self):
"""Gets the api_version of this V1beta2ResourceSlice. # noqa: E501
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:return: The api_version of this V1beta2ResourceSlice. # noqa: E501
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""Sets the api_version of this V1beta2ResourceSlice.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:param api_version: The api_version of this V1beta2ResourceSlice. # noqa: E501
:type: str
"""
self._api_version = api_version
@property
def kind(self):
"""Gets the kind of this V1beta2ResourceSlice. # noqa: E501
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:return: The kind of this V1beta2ResourceSlice. # noqa: E501
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""Sets the kind of this V1beta2ResourceSlice.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:param kind: The kind of this V1beta2ResourceSlice. # noqa: E501
:type: str
"""
self._kind = kind
@property
def metadata(self):
"""Gets the metadata of this V1beta2ResourceSlice. # noqa: E501
:return: The metadata of this V1beta2ResourceSlice. # noqa: E501
:rtype: V1ObjectMeta
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""Sets the metadata of this V1beta2ResourceSlice.
:param metadata: The metadata of this V1beta2ResourceSlice. # noqa: E501
:type: V1ObjectMeta
"""
self._metadata = metadata
@property
def spec(self):
"""Gets the spec of this V1beta2ResourceSlice. # noqa: E501
:return: The spec of this V1beta2ResourceSlice. # noqa: E501
:rtype: V1beta2ResourceSliceSpec
"""
return self._spec
@spec.setter
def spec(self, spec):
"""Sets the spec of this V1beta2ResourceSlice.
:param spec: The spec of this V1beta2ResourceSlice. # noqa: E501
:type: V1beta2ResourceSliceSpec
"""
if self.local_vars_configuration.client_side_validation and spec is None: # noqa: E501
raise ValueError("Invalid value for `spec`, must not be `None`") # noqa: E501
self._spec = spec
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1beta2ResourceSlice):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1beta2ResourceSlice):
return True
return self.to_dict() != other.to_dict()
| V1beta2ResourceSlice |
python | fabric__fabric | fabric/runners.py | {
"start": 6284,
"end": 6392
} | class ____(Remote):
def send_start_message(self, command):
self.channel.invoke_shell()
| RemoteShell |
python | tiangolo__fastapi | docs_src/request_form_models/tutorial002_an.py | {
"start": 124,
"end": 317
} | class ____(BaseModel):
username: str
password: str
model_config = {"extra": "forbid"}
@app.post("/login/")
async def login(data: Annotated[FormData, Form()]):
return data
| FormData |
python | wandb__wandb | wandb/vendor/graphql-core-1.1/wandb_graphql/validation/rules/unique_argument_names.py | {
"start": 69,
"end": 1024
} | class ____(ValidationRule):
__slots__ = 'known_arg_names',
def __init__(self, context):
super(UniqueArgumentNames, self).__init__(context)
self.known_arg_names = {}
def enter_Field(self, node, key, parent, path, ancestors):
self.known_arg_names = {}
def enter_Directive(self, node, key, parent, path, ancestors):
self.known_arg_names = {}
def enter_Argument(self, node, key, parent, path, ancestors):
arg_name = node.name.value
if arg_name in self.known_arg_names:
self.context.report_error(GraphQLError(
self.duplicate_arg_message(arg_name),
[self.known_arg_names[arg_name], node.name]
))
else:
self.known_arg_names[arg_name] = node.name
return False
@staticmethod
def duplicate_arg_message(field):
return 'There can only be one argument named "{}".'.format(field)
| UniqueArgumentNames |
python | getlogbook__logbook | src/logbook/more.py | {
"start": 5495,
"end": 8411
} | class ____(Handler, StringFormatterHandlerMixin):
"""A handler that logs to twitter. Requires that you sign up an
application on twitter and request xauth support. Furthermore the
oauth2 library has to be installed.
.. deprecated:: 1.9
"""
default_format_string = TWITTER_FORMAT_STRING
formatter_class = TwitterFormatter
def __init__(
self,
consumer_key,
consumer_secret,
username,
password,
level=NOTSET,
format_string=None,
filter=None,
bubble=False,
):
Handler.__init__(self, level, filter, bubble)
StringFormatterHandlerMixin.__init__(self, format_string)
self.consumer_key = consumer_key
self.consumer_secret = consumer_secret
self.username = username
self.password = password
try:
import oauth2
except ImportError:
raise RuntimeError(
"The python-oauth2 library is required for the TwitterHandler."
)
self._oauth = oauth2
self._oauth_token = None
self._oauth_token_secret = None
self._consumer = oauth2.Consumer(consumer_key, consumer_secret)
self._client = oauth2.Client(self._consumer)
def get_oauth_token(self):
"""Returns the oauth access token."""
if self._oauth_token is None:
resp, content = self._client.request(
TWITTER_ACCESS_TOKEN_URL + "?",
"POST",
body=urlencode(
{
"x_auth_username": self.username.encode("utf-8"),
"x_auth_password": self.password.encode("utf-8"),
"x_auth_mode": "client_auth",
}
),
headers={"Content-Type": "application/x-www-form-urlencoded"},
)
if resp["status"] != "200":
raise RuntimeError("unable to login to Twitter")
data = dict(parse_qsl(content))
self._oauth_token = data["oauth_token"]
self._oauth_token_secret = data["oauth_token_secret"]
return self._oauth.Token(self._oauth_token, self._oauth_token_secret)
def make_client(self):
"""Creates a new oauth client auth a new access token."""
return self._oauth.Client(self._consumer, self.get_oauth_token())
def tweet(self, status):
"""Tweets a given status. Status must not exceed 140 chars."""
client = self.make_client()
resp, content = client.request( # noqa: RUF059
NEW_TWEET_URL,
"POST",
body=urlencode({"status": status.encode("utf-8")}),
headers={"Content-Type": "application/x-www-form-urlencoded"},
)
return resp["status"] == "200"
def emit(self, record):
self.tweet(self.format(record))
| TwitterHandler |
python | pytorch__pytorch | test/distributed/_shard/sharded_tensor/ops/test_tensor_ops.py | {
"start": 480,
"end": 4427
} | class ____(ShardedTensorTestBase):
@with_comms(init_rpc=False)
@skip_if_lt_x_gpu(TEST_GPU_NUM)
@requires_nccl()
def test_deep_copy(self):
spec = ChunkShardingSpec(
dim=0,
placements=[
"rank:0/cuda:0",
"rank:1/cuda:1",
"rank:2/cuda:2",
"rank:3/cuda:3",
],
)
st = sharded_tensor.rand(spec, (12, 5))
copied_st = copy.deepcopy(st)
self.assertTrue(type(copied_st) is type(st))
self.assertEqual(copied_st.local_tensor(), st.local_tensor())
self.assertFalse(copied_st is st)
@with_comms(init_rpc=False)
@skip_if_lt_x_gpu(TEST_GPU_NUM)
@requires_nccl()
def test_inplace_copy(self):
spec = ChunkShardingSpec(
dim=0,
placements=[
"rank:0/cuda:0",
"rank:1/cuda:1",
"rank:2/cuda:2",
"rank:3/cuda:3",
],
)
st = sharded_tensor.rand(spec, (12, 5))
ones_st = sharded_tensor.ones(spec, (12, 5))
self.assertFalse(torch.equal(ones_st, st))
st.copy_(ones_st)
self.assertTrue(torch.equal(st, ones_st))
# no grad inplace_copy should work between two with different requires_grad
st_with_grad = sharded_tensor.rand(spec, (12, 5), requires_grad=True)
self.assertTrue(st_with_grad.requires_grad)
self.assertFalse(ones_st.requires_grad)
with torch.no_grad():
st_with_grad.copy_(ones_st)
self.assertEqual(st_with_grad.local_tensor(), ones_st.local_tensor())
@with_comms(init_rpc=False)
@skip_if_lt_x_gpu(TEST_GPU_NUM)
@requires_nccl()
def test_clone(self):
spec = ChunkShardingSpec(
dim=0,
placements=[
"rank:0/cuda:0",
"rank:1/cuda:1",
"rank:2/cuda:2",
"rank:3/cuda:3",
],
)
st = sharded_tensor.rand(spec, (12, 5))
copied_st = st.clone()
self.assertTrue(type(copied_st) is type(st))
self.assertEqual(copied_st.local_tensor(), st.local_tensor())
self.assertFalse(copied_st is st)
@with_comms(init_rpc=False)
@skip_if_lt_x_gpu(TEST_GPU_NUM)
@requires_nccl()
def test_detach(self):
spec = ChunkShardingSpec(
dim=0,
placements=[
"rank:0/cuda:0",
"rank:1/cuda:1",
"rank:2/cuda:2",
"rank:3/cuda:3",
],
)
st = sharded_tensor.rand(spec, (12, 5), requires_grad=True)
local_shards = st.local_shards()
# before set requires_grad, all local shards should not require grads
for local_shard in local_shards:
self.assertTrue(local_shard.tensor.requires_grad)
detached_st = st.detach()
self.assertFalse(detached_st.requires_grad)
for local_shard in detached_st.local_shards():
self.assertFalse(local_shard.tensor.requires_grad)
@with_comms(init_rpc=False)
@skip_if_lt_x_gpu(TEST_GPU_NUM)
@requires_nccl()
def test_set_requires_grad(self):
spec = ChunkShardingSpec(
dim=0,
placements=[
"rank:0/cuda:0",
"rank:1/cuda:1",
"rank:2/cuda:2",
"rank:3/cuda:3",
],
)
st = sharded_tensor.rand(spec, (12, 5))
local_shards = st.local_shards()
# before set requires_grad, all local shards should not require grads
for local_shard in local_shards:
self.assertFalse(local_shard.tensor.requires_grad)
st.requires_grad_()
self.assertTrue(st.requires_grad)
for local_shard in local_shards:
self.assertTrue(local_shard.tensor.requires_grad)
if __name__ == "__main__":
run_tests()
| TestTensorOps |
python | GoogleCloudPlatform__python-docs-samples | appengine/standard/urlfetch/snippets/main.py | {
"start": 965,
"end": 1438
} | class ____(webapp2.RequestHandler):
"""Demonstrates an HTTP query using urllib2."""
def get(self):
# [START gae_urlfetch_snippets_urllib2_get]
url = "http://www.google.com/humans.txt"
try:
result = urllib2.urlopen(url)
self.response.write(result.read())
except urllib2.URLError:
logging.exception("Caught exception fetching url")
# [END gae_urlfetch_snippets_urllib2_get]
| UrlLibFetchHandler |
python | tensorflow__tensorflow | tensorflow/python/training/warm_starting_util_test.py | {
"start": 1554,
"end": 57893
} | class ____(test.TestCase):
def _write_vocab(self, string_values, file_name):
vocab_file = os.path.join(self.get_temp_dir(), file_name)
with open(vocab_file, "w") as f:
f.write("\n".join(string_values))
return vocab_file
def _write_checkpoint(self, sess):
self.evaluate(variables.global_variables_initializer())
saver = saver_lib.Saver()
ckpt_prefix = os.path.join(self.get_temp_dir(), "model")
saver.save(sess, ckpt_prefix, global_step=0)
def _create_prev_run_var(self,
var_name,
shape=None,
initializer=None,
partitioner=None):
with ops.Graph().as_default() as g:
with self.session(graph=g) as sess:
var = variable_scope.get_variable(
var_name,
shape=shape,
initializer=initializer,
partitioner=partitioner)
self._write_checkpoint(sess)
if partitioner:
self.assertTrue(isinstance(var, variables.PartitionedVariable))
var = var._get_variable_list()
return var, self.evaluate(var)
def _create_prev_run_vars(self,
var_names,
shapes,
initializers):
with ops.Graph().as_default() as g:
with self.session(graph=g) as sess:
all_vars = []
for var_name, shape, initializer in zip(var_names, shapes,
initializers):
all_vars.append(variable_scope.get_variable(
var_name,
shape=shape,
initializer=initializer))
self._write_checkpoint(sess)
return [self.evaluate(var) for var in all_vars]
def _create_dummy_inputs(self):
return {
"sc_int": array_ops.sparse_placeholder(dtypes.int32),
"sc_hash": array_ops.sparse_placeholder(dtypes.string),
"sc_keys": array_ops.sparse_placeholder(dtypes.string),
"sc_vocab": array_ops.sparse_placeholder(dtypes.string),
"real": array_ops.placeholder(dtypes.float32)
}
def _create_linear_model(self, feature_cols, partitioner):
cols_to_vars = {}
with variable_scope.variable_scope("", partitioner=partitioner):
# Create the variables.
fc.linear_model(
features=self._create_dummy_inputs(),
feature_columns=feature_cols,
units=1,
cols_to_vars=cols_to_vars)
# Return a dictionary mapping each column to its variable.
return cols_to_vars
def _assert_cols_to_vars(self, cols_to_vars, cols_to_expected_values, sess):
for col, expected_values in cols_to_expected_values.items():
for i, var in enumerate(cols_to_vars[col]):
self.assertAllClose(expected_values[i], var.eval(sess))
def testWarmStartVar(self):
_, prev_val = self._create_prev_run_var(
"fruit_weights", initializer=[[0.5], [1.], [1.5], [2.]])
with ops.Graph().as_default() as g:
with self.session(graph=g) as sess:
fruit_weights = variable_scope.get_variable(
"fruit_weights", initializer=[[0.], [0.], [0.], [0.]])
prev_tensor_name, var = ws_util._get_var_info(fruit_weights)
checkpoint_utils.init_from_checkpoint(self.get_temp_dir(),
{prev_tensor_name: var})
self.evaluate(variables.global_variables_initializer())
self.assertAllClose(prev_val, fruit_weights.eval(sess))
def testWarmStartVarPrevVarPartitioned(self):
_, weights = self._create_prev_run_var(
"fruit_weights",
shape=[4, 1],
initializer=[[0.5], [1.], [1.5], [2.]],
partitioner=lambda shape, dtype: [2, 1])
prev_val = np.concatenate([weights[0], weights[1]], axis=0)
with ops.Graph().as_default() as g:
with self.session(graph=g) as sess:
fruit_weights = variable_scope.get_variable(
"fruit_weights", initializer=[[0.], [0.], [0.], [0.]])
prev_tensor_name, var = ws_util._get_var_info(fruit_weights)
checkpoint_utils.init_from_checkpoint(self.get_temp_dir(),
{prev_tensor_name: var})
self.evaluate(variables.global_variables_initializer())
self.assertAllClose(prev_val, fruit_weights.eval(sess))
def testWarmStartVarCurrentVarPartitioned(self):
_, prev_val = self._create_prev_run_var(
"fruit_weights", initializer=[[0.5], [1.], [1.5], [2.]])
with ops.Graph().as_default() as g:
with self.session(graph=g) as sess:
fruit_weights = variable_scope.get_variable(
"fruit_weights",
shape=[4, 1],
initializer=[[0.], [0.], [0.], [0.]],
partitioner=lambda shape, dtype: [2, 1])
self.assertTrue(
isinstance(fruit_weights, variables.PartitionedVariable))
prev_tensor_name, var = ws_util._get_var_info(fruit_weights)
checkpoint_utils.init_from_checkpoint(self.get_temp_dir(),
{prev_tensor_name: var})
self.evaluate(variables.global_variables_initializer())
fruit_weights = fruit_weights._get_variable_list()
new_val = np.concatenate(
[fruit_weights[0].eval(sess), fruit_weights[1].eval(sess)], axis=0)
self.assertAllClose(prev_val, new_val)
def testWarmStartVarBothVarsPartitioned(self):
_, weights = self._create_prev_run_var(
"old_scope/fruit_weights",
shape=[4, 1],
initializer=[[0.5], [1.], [1.5], [2.]],
partitioner=lambda shape, dtype: [2, 1])
prev_val = np.concatenate([weights[0], weights[1]], axis=0)
# New session and new graph.
with ops.Graph().as_default() as g:
with self.session(graph=g) as sess:
fruit_weights = variable_scope.get_variable(
"new_scope/fruit_weights",
shape=[4, 1],
initializer=[[0.], [0.], [0.], [0.]],
partitioner=lambda shape, dtype: [2, 1])
self.assertTrue(
isinstance(fruit_weights, variables.PartitionedVariable))
prev_tensor_name, var = ws_util._get_var_info(
fruit_weights, prev_tensor_name="old_scope/fruit_weights")
checkpoint_utils.init_from_checkpoint(self.get_temp_dir(),
{prev_tensor_name: var})
self.evaluate(variables.global_variables_initializer())
fruit_weights = fruit_weights._get_variable_list()
new_val = np.concatenate(
[fruit_weights[0].eval(sess), fruit_weights[1].eval(sess)], axis=0)
self.assertAllClose(prev_val, new_val)
def testWarmStartVarWithVocab(self):
prev_vocab_path = self._write_vocab(["apple", "banana", "guava", "orange"],
"old_vocab")
self._create_prev_run_var(
"fruit_weights", initializer=[[0.5], [1.], [1.5], [2.]])
# New vocab with elements in reverse order and one new element.
new_vocab_path = self._write_vocab(
["orange", "guava", "banana", "apple", "raspberry"], "new_vocab")
# New session and new graph.
with ops.Graph().as_default() as g:
with self.session(graph=g) as sess:
fruit_weights = variable_scope.get_variable(
"fruit_weights", initializer=[[0.], [0.], [0.], [0.], [0.]])
ws_util._warm_start_var_with_vocab(fruit_weights, new_vocab_path, 5,
self.get_temp_dir(), prev_vocab_path)
self.evaluate(variables.global_variables_initializer())
self.assertAllClose([[2.], [1.5], [1.], [0.5], [0.]],
fruit_weights.eval(sess))
def testWarmStartVarWithColumnVocab(self):
prev_vocab_path = self._write_vocab(["apple", "orange"], "old_vocab")
self._create_prev_run_var(
"fruit_output_layer",
initializer=[[0.5, 0.3], [1., 0.8], [1.5, 1.2], [2., 2.3]])
# New vocab with elements in reverse order and one new element.
new_vocab_path = self._write_vocab(["orange", "apple", "banana"],
"new_vocab")
# New session and new graph.
with ops.Graph().as_default() as g:
with self.session(graph=g) as sess:
fruit_output_layer = variable_scope.get_variable(
"fruit_output_layer",
initializer=[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.],
[0., 0., 0.]])
ws_util._warm_start_var_with_vocab(fruit_output_layer, new_vocab_path,
current_vocab_size=3,
prev_ckpt=self.get_temp_dir(),
prev_vocab_path=prev_vocab_path,
axis=1)
self.evaluate(variables.global_variables_initializer())
self.assertAllClose([[0.3, 0.5, 0.], [0.8, 1.0, 0.], [1.2, 1.5, 0.],
[2.3, 2., 0.]], fruit_output_layer.eval(sess))
def testWarmStartVarWithVocabConstrainedOldVocabSize(self):
prev_vocab_path = self._write_vocab(["apple", "banana", "guava", "orange"],
"old_vocab")
self._create_prev_run_var(
"fruit_weights", initializer=[[0.5], [1.], [1.5], [2.]])
# New vocab with elements in reverse order and one new element.
new_vocab_path = self._write_vocab(
["orange", "guava", "banana", "apple", "raspberry"], "new_vocab")
# New session and new graph.
with ops.Graph().as_default() as g:
with self.session(graph=g) as sess:
fruit_weights = variable_scope.get_variable(
"fruit_weights", initializer=[[0.], [0.], [0.], [0.], [0.]])
ws_util._warm_start_var_with_vocab(
fruit_weights,
new_vocab_path,
5,
self.get_temp_dir(),
prev_vocab_path,
previous_vocab_size=2)
self.evaluate(variables.global_variables_initializer())
# Old vocabulary limited to ['apple', 'banana'].
self.assertAllClose([[0.], [0.], [1.], [0.5], [0.]],
fruit_weights.eval(sess))
def testWarmStartVarWithVocabPrevVarPartitioned(self):
prev_vocab_path = self._write_vocab(["apple", "banana", "guava", "orange"],
"old_vocab")
self._create_prev_run_var(
"fruit_weights",
shape=[4, 1],
initializer=[[0.5], [1.], [1.5], [2.]],
partitioner=lambda shape, dtype: [2, 1])
# New vocab with elements in reverse order and one new element.
new_vocab_path = self._write_vocab(
["orange", "guava", "banana", "apple", "raspberry"], "new_vocab")
# New session and new graph.
with ops.Graph().as_default() as g:
with self.session(graph=g) as sess:
fruit_weights = variable_scope.get_variable(
"fruit_weights", initializer=[[0.], [0.], [0.], [0.], [0.]])
ws_util._warm_start_var_with_vocab(fruit_weights, new_vocab_path, 5,
self.get_temp_dir(), prev_vocab_path)
self.evaluate(variables.global_variables_initializer())
self.assertAllClose([[2.], [1.5], [1.], [0.5], [0.]],
fruit_weights.eval(sess))
def testWarmStartVarWithColumnVocabPrevVarPartitioned(self):
prev_vocab_path = self._write_vocab(["apple", "orange"], "old_vocab")
self._create_prev_run_var(
"fruit_output_layer",
shape=[4, 2],
initializer=[[0.5, 0.3], [1., 0.8], [1.5, 1.2], [2., 2.3]],
partitioner=lambda shape, dtype: [2, 1])
# New vocab with elements in reverse order and one new element.
new_vocab_path = self._write_vocab(["orange", "apple", "banana"],
"new_vocab")
# New session and new graph.
with ops.Graph().as_default() as g:
with self.session(graph=g) as sess:
fruit_output_layer = variable_scope.get_variable(
"fruit_output_layer",
initializer=[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.],
[0., 0., 0.]])
ws_util._warm_start_var_with_vocab(fruit_output_layer, new_vocab_path,
current_vocab_size=3,
prev_ckpt=self.get_temp_dir(),
prev_vocab_path=prev_vocab_path,
axis=1)
self.evaluate(variables.global_variables_initializer())
self.assertAllClose([[0.3, 0.5, 0.], [0.8, 1.0, 0.], [1.2, 1.5, 0.],
[2.3, 2., 0.]], fruit_output_layer.eval(sess))
def testWarmStartVarWithVocabCurrentVarPartitioned(self):
prev_vocab_path = self._write_vocab(["apple", "banana", "guava", "orange"],
"old_vocab")
self._create_prev_run_var(
"fruit_weights", initializer=[[0.5], [1.], [1.5], [2.]])
# New vocab with elements in reverse order and one new element.
new_vocab_path = self._write_vocab(
["orange", "guava", "banana", "apple", "raspberry"], "new_vocab")
# New session and new graph.
with ops.Graph().as_default() as g:
with self.session(graph=g) as sess:
fruit_weights = variable_scope.get_variable(
"fruit_weights",
shape=[6, 1],
initializer=[[0.], [0.], [0.], [0.], [0.], [0.]],
partitioner=lambda shape, dtype: [2, 1])
ws_util._warm_start_var_with_vocab(
fruit_weights,
new_vocab_path,
5,
self.get_temp_dir(),
prev_vocab_path,
current_oov_buckets=1)
self.evaluate(variables.global_variables_initializer())
self.assertTrue(
isinstance(fruit_weights, variables.PartitionedVariable))
fruit_weights_vars = fruit_weights._get_variable_list()
self.assertAllClose([[2.], [1.5], [1.]],
fruit_weights_vars[0].eval(sess))
self.assertAllClose([[0.5], [0.], [0.]],
fruit_weights_vars[1].eval(sess))
def testWarmStartVarWithColumnVocabCurrentVarPartitioned(self):
prev_vocab_path = self._write_vocab(["apple", "orange"], "old_vocab")
self._create_prev_run_var(
"fruit_output_layer",
initializer=[[0.5, 0.3], [1., 0.8], [1.5, 1.2], [2., 2.3]])
# New vocab with elements in reverse order and one new element.
new_vocab_path = self._write_vocab(["orange", "apple", "banana"],
"new_vocab")
# New session and new graph.
with ops.Graph().as_default() as g:
with self.session(graph=g) as sess:
fruit_output_layer = variable_scope.get_variable(
"fruit_output_layer",
shape=[4, 3],
initializer=[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.],
[0., 0., 0.]],
partitioner=lambda shape, dtype: [2, 1])
ws_util._warm_start_var_with_vocab(fruit_output_layer, new_vocab_path,
current_vocab_size=3,
prev_ckpt=self.get_temp_dir(),
prev_vocab_path=prev_vocab_path,
axis=1)
self.evaluate(variables.global_variables_initializer())
self.assertTrue(
isinstance(fruit_output_layer, variables.PartitionedVariable))
fruit_output_layer_vars = fruit_output_layer._get_variable_list()
self.assertAllClose([[0.3, 0.5, 0.], [0.8, 1.0, 0.]],
fruit_output_layer_vars[0].eval(sess))
self.assertAllClose([[1.2, 1.5, 0.], [2.3, 2., 0.]],
fruit_output_layer_vars[1].eval(sess))
def testWarmStartVarWithVocabBothVarsPartitioned(self):
prev_vocab_path = self._write_vocab(["apple", "banana", "guava", "orange"],
"old_vocab")
self._create_prev_run_var(
"fruit_weights",
shape=[4, 1],
initializer=[[0.5], [1.], [1.5], [2.]],
partitioner=lambda shape, dtype: [2, 1])
# New vocab with elements in reverse order and two new elements.
new_vocab_path = self._write_vocab(
["orange", "guava", "banana", "apple", "raspberry",
"blueberry"], "new_vocab")
# New session and new graph.
with ops.Graph().as_default() as g:
with self.session(graph=g) as sess:
fruit_weights = variable_scope.get_variable(
"fruit_weights",
shape=[6, 1],
initializer=[[0.], [0.], [0.], [0.], [0.], [0.]],
partitioner=lambda shape, dtype: [2, 1])
ws_util._warm_start_var_with_vocab(fruit_weights, new_vocab_path, 6,
self.get_temp_dir(), prev_vocab_path)
self.evaluate(variables.global_variables_initializer())
self.assertTrue(
isinstance(fruit_weights, variables.PartitionedVariable))
fruit_weights_vars = fruit_weights._get_variable_list()
self.assertAllClose([[2.], [1.5], [1.]],
fruit_weights_vars[0].eval(sess))
self.assertAllClose([[0.5], [0.], [0.]],
fruit_weights_vars[1].eval(sess))
def testWarmStartVarWithColumnVocabBothVarsPartitioned(self):
prev_vocab_path = self._write_vocab(["apple", "orange"], "old_vocab")
self._create_prev_run_var(
"fruit_output_layer",
shape=[4, 2],
initializer=[[0.5, 0.3], [1., 0.8], [1.5, 1.2], [2., 2.3]],
partitioner=lambda shape, dtype: [2, 1])
# New vocab with elements in reverse order and one new element.
new_vocab_path = self._write_vocab(["orange", "apple", "banana"],
"new_vocab")
# New session and new graph.
with ops.Graph().as_default() as g:
with self.session(graph=g) as sess:
fruit_output_layer = variable_scope.get_variable(
"fruit_output_layer",
shape=[4, 3],
initializer=[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.],
[0., 0., 0.]],
partitioner=lambda shape, dtype: [2, 1])
ws_util._warm_start_var_with_vocab(fruit_output_layer, new_vocab_path,
current_vocab_size=3,
prev_ckpt=self.get_temp_dir(),
prev_vocab_path=prev_vocab_path,
axis=1)
self.evaluate(variables.global_variables_initializer())
self.assertTrue(
isinstance(fruit_output_layer, variables.PartitionedVariable))
fruit_output_layer_vars = fruit_output_layer._get_variable_list()
self.assertAllClose([[0.3, 0.5, 0.], [0.8, 1.0, 0.]],
fruit_output_layer_vars[0].eval(sess))
self.assertAllClose([[1.2, 1.5, 0.], [2.3, 2., 0.]],
fruit_output_layer_vars[1].eval(sess))
def testWarmStart_ListOfVariables(self):
# Save checkpoint from which to warm-start.
_, prev_int_val = self._create_prev_run_var("v1", shape=[10, 1],
initializer=ones())
# Verify we initialized the values correctly.
self.assertAllEqual(np.ones([10, 1]), prev_int_val)
# New graph, new session with warm-starting.
with ops.Graph().as_default() as g:
with self.session(graph=g) as sess:
# Initialize with zeros.
var = variable_scope.get_variable(
"v1",
shape=[10, 1],
initializer=zeros())
ws_util.warm_start(self.get_temp_dir(), vars_to_warm_start=[var])
self.evaluate(variables.global_variables_initializer())
# Verify weights were correctly warm-started (init overridden to ones).
self.assertAllEqual(var, prev_int_val)
def testWarmStart_ListOfStrings(self):
# Save checkpoint from which to warm-start.
_, prev_int_val = self._create_prev_run_var("v1", shape=[10, 1],
initializer=ones())
# Verify we initialized the values correctly.
self.assertAllEqual(np.ones([10, 1]), prev_int_val)
# New graph, new session with warm-starting.
with ops.Graph().as_default() as g:
with self.session(graph=g) as sess:
# Initialize with zeros.
var = variable_scope.get_variable(
"v1",
shape=[10, 1],
initializer=zeros())
ws_util.warm_start(self.get_temp_dir(), vars_to_warm_start=["v1"])
self.evaluate(variables.global_variables_initializer())
# Verify weights were correctly warm-started (init overridden to ones).
self.assertAllEqual(var, prev_int_val)
def testWarmStart_TwoVarsFromTheSamePrevVar(self):
# Save checkpoint from which to warm-start.
_, prev_int_val = self._create_prev_run_var("v1", shape=[10, 1],
initializer=ones())
# Verify we initialized the values correctly.
self.assertAllEqual(np.ones([10, 1]), prev_int_val)
# New graph, new session with warm-starting.
with ops.Graph().as_default() as g:
with self.session(graph=g):
# Initialize with zeros.
var = variable_scope.get_variable(
"v1",
shape=[10, 1],
initializer=zeros())
var2 = variable_scope.get_variable(
"v2",
shape=[10, 1],
initializer=zeros())
ws_util.warm_start(self.get_temp_dir(),
vars_to_warm_start=["v1", "v2"],
var_name_to_prev_var_name=dict(v2="v1"))
self.evaluate(variables.global_variables_initializer())
# Verify weights were correctly warm-started (init overridden to ones).
self.assertAllEqual(var, prev_int_val)
self.assertAllEqual(var2, prev_int_val)
def testWarmStart_ListOfRegexes(self):
# Save checkpoint from which to warm-start.
[prev_v1_val, prev_v1_momentum_val,
prev_v2_val, _] = self._create_prev_run_vars(
var_names=["v1", "v1/Momentum", "v2", "v2/Momentum"],
shapes=[[10, 1]] * 4,
initializers=[ones()] * 4)
# New graph, new session with warm-starting.
with ops.Graph().as_default() as g:
with self.session(graph=g) as sess:
# Initialize with zeros.
v1 = variable_scope.get_variable(
"v1",
shape=[10, 1],
initializer=zeros())
v1_momentum = variable_scope.get_variable(
"v1/Momentum",
shape=[10, 1],
initializer=zeros())
v2 = variable_scope.get_variable(
"v2",
shape=[10, 1],
initializer=zeros())
v2_momentum = variable_scope.get_variable(
"v2/Momentum",
shape=[10, 1],
initializer=zeros())
ws_util.warm_start(self.get_temp_dir(),
# This warm-starts both v1 and v1/Momentum, but only
# v2 (and not v2/Momentum).
vars_to_warm_start=["v1", "v2[^/]"])
self.evaluate(variables.global_variables_initializer())
# Verify the selection of weights were correctly warm-started (init
# overridden to ones).
self.assertAllEqual(v1, prev_v1_val)
self.assertAllEqual(v1_momentum, prev_v1_momentum_val)
self.assertAllEqual(v2, prev_v2_val)
self.assertAllEqual(v2_momentum, np.zeros([10, 1]))
def testWarmStart_SparseColumnIntegerized(self):
# Create feature column.
sc_int = fc.categorical_column_with_identity("sc_int", num_buckets=10)
# Save checkpoint from which to warm-start.
_, prev_int_val = self._create_prev_run_var(
"linear_model/sc_int/weights", shape=[10, 1], initializer=ones())
# Verify we initialized the values correctly.
self.assertAllEqual(np.ones([10, 1]), prev_int_val)
partitioner = lambda shape, dtype: [1] * len(shape)
# New graph, new session WITHOUT warm-starting.
with ops.Graph().as_default() as g:
with self.session(graph=g) as sess:
cols_to_vars = self._create_linear_model([sc_int], partitioner)
self.evaluate(variables.global_variables_initializer())
# Without warm-starting, the weights should be initialized using default
# initializer (which is init_ops.zeros_initializer).
self._assert_cols_to_vars(cols_to_vars, {sc_int: [np.zeros([10, 1])]},
sess)
# New graph, new session with warm-starting.
with ops.Graph().as_default() as g:
with self.session(graph=g) as sess:
cols_to_vars = self._create_linear_model([sc_int], partitioner)
ws_util.warm_start(self.get_temp_dir(), vars_to_warm_start=".*sc_int.*")
self.evaluate(variables.global_variables_initializer())
# Verify weights were correctly warm-started.
self._assert_cols_to_vars(cols_to_vars, {sc_int: [prev_int_val]}, sess)
def testWarmStart_SparseColumnHashed(self):
# Create feature column.
sc_hash = fc.categorical_column_with_hash_bucket(
"sc_hash", hash_bucket_size=15)
# Save checkpoint from which to warm-start.
_, prev_hash_val = self._create_prev_run_var(
"linear_model/sc_hash/weights", shape=[15, 1], initializer=norms())
partitioner = lambda shape, dtype: [1] * len(shape)
# New graph, new session WITHOUT warm-starting.
with ops.Graph().as_default() as g:
with self.session(graph=g) as sess:
cols_to_vars = self._create_linear_model([sc_hash], partitioner)
self.evaluate(variables.global_variables_initializer())
# Without warm-starting, the weights should be initialized using default
# initializer (which is init_ops.zeros_initializer).
self._assert_cols_to_vars(cols_to_vars, {sc_hash: [np.zeros([15, 1])]},
sess)
# New graph, new session with warm-starting.
with ops.Graph().as_default() as g:
with self.session(graph=g) as sess:
cols_to_vars = self._create_linear_model([sc_hash], partitioner)
ws_util.warm_start(
self.get_temp_dir(), vars_to_warm_start=".*sc_hash.*")
self.evaluate(variables.global_variables_initializer())
# Verify weights were correctly warm-started.
self._assert_cols_to_vars(cols_to_vars, {sc_hash: [prev_hash_val]},
sess)
def testWarmStart_SparseColumnVocabulary(self):
# Create vocab for sparse column "sc_vocab".
vocab_path = self._write_vocab(["apple", "banana", "guava", "orange"],
"vocab")
# Create feature column.
sc_vocab = fc.categorical_column_with_vocabulary_file(
"sc_vocab", vocabulary_file=vocab_path, vocabulary_size=4)
# Save checkpoint from which to warm-start.
_, prev_vocab_val = self._create_prev_run_var(
"linear_model/sc_vocab/weights", shape=[4, 1], initializer=ones())
partitioner = lambda shape, dtype: [1] * len(shape)
# New graph, new session WITHOUT warm-starting.
with ops.Graph().as_default() as g:
with self.session(graph=g) as sess:
cols_to_vars = self._create_linear_model([sc_vocab], partitioner)
self.evaluate(variables.global_variables_initializer())
# Without warm-starting, the weights should be initialized using default
# initializer (which is init_ops.zeros_initializer).
self._assert_cols_to_vars(cols_to_vars, {sc_vocab: [np.zeros([4, 1])]},
sess)
# New graph, new session with warm-starting.
with ops.Graph().as_default() as g:
with self.session(graph=g) as sess:
cols_to_vars = self._create_linear_model([sc_vocab], partitioner)
# Since old vocab is not explicitly set in WarmStartSettings, the old
# vocab is assumed to be same as new vocab.
ws_util.warm_start(
self.get_temp_dir(), vars_to_warm_start=".*sc_vocab.*")
self.evaluate(variables.global_variables_initializer())
# Verify weights were correctly warm-started.
self._assert_cols_to_vars(cols_to_vars, {sc_vocab: [prev_vocab_val]},
sess)
def testWarmStart_ExplicitCheckpointFile(self):
# Create vocab for sparse column "sc_vocab".
vocab_path = self._write_vocab(["apple", "banana", "guava", "orange"],
"vocab")
# Create feature column.
sc_vocab = fc.categorical_column_with_vocabulary_file(
"sc_vocab", vocabulary_file=vocab_path, vocabulary_size=4)
# Save checkpoint from which to warm-start.
_, prev_vocab_val = self._create_prev_run_var(
"linear_model/sc_vocab/weights", shape=[4, 1], initializer=ones())
partitioner = lambda shape, dtype: [1] * len(shape)
# New graph, new session WITHOUT warm-starting.
with ops.Graph().as_default() as g:
with self.session(graph=g) as sess:
cols_to_vars = self._create_linear_model([sc_vocab], partitioner)
self.evaluate(variables.global_variables_initializer())
# Without warm-starting, the weights should be initialized using default
# initializer (which is init_ops.zeros_initializer).
self._assert_cols_to_vars(cols_to_vars, {sc_vocab: [np.zeros([4, 1])]},
sess)
# New graph, new session with warm-starting.
with ops.Graph().as_default() as g:
with self.session(graph=g) as sess:
cols_to_vars = self._create_linear_model([sc_vocab], partitioner)
# Since old vocab is not explicitly set in WarmStartSettings, the old
# vocab is assumed to be same as new vocab.
ws_util.warm_start(
# Explicitly provide the file prefix instead of just the dir.
os.path.join(self.get_temp_dir(), "model-0"),
vars_to_warm_start=".*sc_vocab.*")
self.evaluate(variables.global_variables_initializer())
# Verify weights were correctly warm-started.
self._assert_cols_to_vars(cols_to_vars, {sc_vocab: [prev_vocab_val]},
sess)
def testWarmStart_SparseColumnVocabularyConstrainedVocabSizes(self):
# Create old vocabulary, and use a size smaller than the total number of
# entries.
old_vocab_path = self._write_vocab(["apple", "guava", "banana"],
"old_vocab")
old_vocab_size = 2 # ['apple', 'guava']
# Create new vocab for sparse column "sc_vocab".
current_vocab_path = self._write_vocab(
["apple", "banana", "guava", "orange"], "current_vocab")
# Create feature column. Only use 2 of the actual entries, resulting in
# ['apple', 'banana'] for the new vocabulary.
sc_vocab = fc.categorical_column_with_vocabulary_file(
"sc_vocab", vocabulary_file=current_vocab_path, vocabulary_size=2)
# Save checkpoint from which to warm-start.
self._create_prev_run_var(
"linear_model/sc_vocab/weights", shape=[2, 1], initializer=ones())
partitioner = lambda shape, dtype: [1] * len(shape)
# New graph, new session WITHOUT warm-starting.
with ops.Graph().as_default() as g:
with self.session(graph=g) as sess:
cols_to_vars = self._create_linear_model([sc_vocab], partitioner)
self.evaluate(variables.global_variables_initializer())
# Without warm-starting, the weights should be initialized using default
# initializer (which is init_ops.zeros_initializer).
self._assert_cols_to_vars(cols_to_vars, {sc_vocab: [np.zeros([2, 1])]},
sess)
# New graph, new session with warm-starting.
with ops.Graph().as_default() as g:
with self.session(graph=g) as sess:
cols_to_vars = self._create_linear_model([sc_vocab], partitioner)
vocab_info = ws_util.VocabInfo(
new_vocab=sc_vocab.vocabulary_file,
new_vocab_size=sc_vocab.vocabulary_size,
num_oov_buckets=sc_vocab.num_oov_buckets,
old_vocab=old_vocab_path,
old_vocab_size=old_vocab_size)
ws_util.warm_start(
ckpt_to_initialize_from=self.get_temp_dir(),
vars_to_warm_start=".*sc_vocab.*",
var_name_to_vocab_info={
"linear_model/sc_vocab/weights": vocab_info
})
self.evaluate(variables.global_variables_initializer())
# Verify weights were correctly warm-started. 'banana' isn't in the
# first two entries of the old vocabulary, so it's newly initialized.
self._assert_cols_to_vars(cols_to_vars, {sc_vocab: [[[1], [0]]]}, sess)
def testWarmStart_BucketizedColumn(self):
# Create feature column.
real = fc.numeric_column("real")
real_bucket = fc.bucketized_column(real, boundaries=[0., 1., 2., 3.])
# Save checkpoint from which to warm-start.
_, prev_bucket_val = self._create_prev_run_var(
"linear_model/real_bucketized/weights",
shape=[5, 1],
initializer=norms())
partitioner = lambda shape, dtype: [1] * len(shape)
# New graph, new session WITHOUT warm-starting.
with ops.Graph().as_default() as g:
with self.session(graph=g) as sess:
cols_to_vars = self._create_linear_model([real_bucket], partitioner)
self.evaluate(variables.global_variables_initializer())
# Without warm-starting, the weights should be initialized using default
# initializer (which is init_ops.zeros_initializer).
self._assert_cols_to_vars(cols_to_vars,
{real_bucket: [np.zeros([5, 1])]}, sess)
# New graph, new session with warm-starting.
with ops.Graph().as_default() as g:
with self.session(graph=g) as sess:
cols_to_vars = self._create_linear_model([real_bucket], partitioner)
ws_util.warm_start(
self.get_temp_dir(), vars_to_warm_start=".*real_bucketized.*")
self.evaluate(variables.global_variables_initializer())
# Verify weights were correctly warm-started.
self._assert_cols_to_vars(cols_to_vars,
{real_bucket: [prev_bucket_val]}, sess)
def testWarmStart_MultipleCols(self):
# Create vocab for sparse column "sc_vocab".
vocab_path = self._write_vocab(["apple", "banana", "guava", "orange"],
"vocab")
# Create feature columns.
sc_int = fc.categorical_column_with_identity("sc_int", num_buckets=10)
sc_hash = fc.categorical_column_with_hash_bucket(
"sc_hash", hash_bucket_size=15)
sc_keys = fc.categorical_column_with_vocabulary_list(
"sc_keys", vocabulary_list=["a", "b", "c", "e"])
sc_vocab = fc.categorical_column_with_vocabulary_file(
"sc_vocab", vocabulary_file=vocab_path, vocabulary_size=4)
real = fc.numeric_column("real")
real_bucket = fc.bucketized_column(real, boundaries=[0., 1., 2., 3.])
cross = fc.crossed_column([sc_keys, sc_vocab], hash_bucket_size=20)
all_linear_cols = [sc_int, sc_hash, sc_keys, sc_vocab, real_bucket, cross]
# Save checkpoint from which to warm-start. Also create a bias variable,
# so we can check that it's also warm-started.
with ops.Graph().as_default() as g:
with self.session(graph=g) as sess:
sc_int_weights = variable_scope.get_variable(
"linear_model/sc_int/weights", shape=[10, 1], initializer=ones())
sc_hash_weights = variable_scope.get_variable(
"linear_model/sc_hash/weights", shape=[15, 1], initializer=norms())
sc_keys_weights = variable_scope.get_variable(
"linear_model/sc_keys/weights", shape=[4, 1], initializer=rand())
sc_vocab_weights = variable_scope.get_variable(
"linear_model/sc_vocab/weights", shape=[4, 1], initializer=ones())
real_bucket_weights = variable_scope.get_variable(
"linear_model/real_bucketized/weights",
shape=[5, 1],
initializer=norms())
cross_weights = variable_scope.get_variable(
"linear_model/sc_keys_X_sc_vocab/weights",
shape=[20, 1],
initializer=rand())
bias = variable_scope.get_variable(
"linear_model/bias_weights",
shape=[1],
initializer=rand())
self._write_checkpoint(sess)
(prev_int_val, prev_hash_val, prev_keys_val, prev_vocab_val,
prev_bucket_val, prev_cross_val, prev_bias_val) = sess.run([
sc_int_weights, sc_hash_weights, sc_keys_weights, sc_vocab_weights,
real_bucket_weights, cross_weights, bias
])
partitioner = lambda shape, dtype: [1] * len(shape)
# New graph, new session WITHOUT warm-starting.
with ops.Graph().as_default() as g:
with self.session(graph=g) as sess:
cols_to_vars = self._create_linear_model(all_linear_cols, partitioner)
self.evaluate(variables.global_variables_initializer())
# Without warm-starting, all weights should be initialized using default
# initializer (which is init_ops.zeros_initializer).
self._assert_cols_to_vars(cols_to_vars, {
sc_int: [np.zeros([10, 1])],
sc_hash: [np.zeros([15, 1])],
sc_keys: [np.zeros([4, 1])],
sc_vocab: [np.zeros([4, 1])],
real_bucket: [np.zeros([5, 1])],
cross: [np.zeros([20, 1])],
}, sess)
# New graph, new session with warm-starting.
with ops.Graph().as_default() as g:
with self.session(graph=g) as sess:
cols_to_vars = self._create_linear_model(all_linear_cols, partitioner)
vocab_info = ws_util.VocabInfo(
new_vocab=sc_vocab.vocabulary_file,
new_vocab_size=sc_vocab.vocabulary_size,
num_oov_buckets=sc_vocab.num_oov_buckets,
old_vocab=vocab_path)
ws_util.warm_start(
self.get_temp_dir(),
var_name_to_vocab_info={
"linear_model/sc_vocab/weights": vocab_info
})
self.evaluate(variables.global_variables_initializer())
# Verify weights were correctly warm-started.
self._assert_cols_to_vars(cols_to_vars, {
sc_int: [prev_int_val],
sc_hash: [prev_hash_val],
sc_keys: [prev_keys_val],
sc_vocab: [prev_vocab_val],
real_bucket: [prev_bucket_val],
cross: [prev_cross_val],
"bias": [prev_bias_val],
}, sess)
def testWarmStartMoreSettings(self):
# Create old and new vocabs for sparse column "sc_vocab".
prev_vocab_path = self._write_vocab(["apple", "banana", "guava", "orange"],
"old_vocab")
new_vocab_path = self._write_vocab(
["orange", "guava", "banana", "apple", "raspberry",
"blueberry"], "new_vocab")
# Create feature columns.
sc_hash = fc.categorical_column_with_hash_bucket(
"sc_hash", hash_bucket_size=15)
sc_keys = fc.categorical_column_with_vocabulary_list(
"sc_keys", vocabulary_list=["a", "b", "c", "e"])
sc_vocab = fc.categorical_column_with_vocabulary_file(
"sc_vocab", vocabulary_file=new_vocab_path, vocabulary_size=6)
all_linear_cols = [sc_hash, sc_keys, sc_vocab]
# Save checkpoint from which to warm-start.
with ops.Graph().as_default() as g:
with self.session(graph=g) as sess:
variable_scope.get_variable(
"linear_model/sc_hash/weights", shape=[15, 1], initializer=norms())
sc_keys_weights = variable_scope.get_variable(
"some_other_name", shape=[4, 1], initializer=rand())
variable_scope.get_variable(
"linear_model/sc_vocab/weights",
initializer=[[0.5], [1.], [2.], [3.]])
self._write_checkpoint(sess)
prev_keys_val = self.evaluate(sc_keys_weights)
def _partitioner(shape, dtype): # pylint:disable=unused-argument
# Partition each var into 2 equal slices.
partitions = [1] * len(shape)
partitions[0] = min(2, shape.dims[0].value)
return partitions
# New graph, new session with warm-starting.
with ops.Graph().as_default() as g:
with self.session(graph=g) as sess:
cols_to_vars = self._create_linear_model(all_linear_cols, _partitioner)
vocab_info = ws_util.VocabInfo(
new_vocab=sc_vocab.vocabulary_file,
new_vocab_size=sc_vocab.vocabulary_size,
num_oov_buckets=sc_vocab.num_oov_buckets,
old_vocab=prev_vocab_path)
ws_util.warm_start(
self.get_temp_dir(),
vars_to_warm_start=".*(sc_keys|sc_vocab).*",
var_name_to_vocab_info={
ws_util._infer_var_name(cols_to_vars[sc_vocab]): vocab_info
},
var_name_to_prev_var_name={
ws_util._infer_var_name(cols_to_vars[sc_keys]):
"some_other_name"
})
self.evaluate(variables.global_variables_initializer())
# Verify weights were correctly warm-started. Var corresponding to
# sc_hash should not be warm-started. Var corresponding to sc_vocab
# should be correctly warm-started after vocab remapping.
self._assert_cols_to_vars(cols_to_vars, {
sc_keys:
np.split(prev_keys_val, 2),
sc_hash: [np.zeros([8, 1]), np.zeros([7, 1])],
sc_vocab: [
np.array([[3.], [2.], [1.]]),
np.array([[0.5], [0.], [0.]])
]
}, sess)
def testWarmStartMoreSettingsNoPartitioning(self):
# Create old and new vocabs for sparse column "sc_vocab".
prev_vocab_path = self._write_vocab(["apple", "banana", "guava", "orange"],
"old_vocab")
new_vocab_path = self._write_vocab(
["orange", "guava", "banana", "apple", "raspberry",
"blueberry"], "new_vocab")
# Create feature columns.
sc_hash = fc.categorical_column_with_hash_bucket(
"sc_hash", hash_bucket_size=15)
sc_keys = fc.categorical_column_with_vocabulary_list(
"sc_keys", vocabulary_list=["a", "b", "c", "e"])
sc_vocab = fc.categorical_column_with_vocabulary_file(
"sc_vocab", vocabulary_file=new_vocab_path, vocabulary_size=6)
all_linear_cols = [sc_hash, sc_keys, sc_vocab]
# Save checkpoint from which to warm-start.
with ops.Graph().as_default() as g:
with self.session(graph=g) as sess:
variable_scope.get_variable(
"linear_model/sc_hash/weights", shape=[15, 1], initializer=norms())
sc_keys_weights = variable_scope.get_variable(
"some_other_name", shape=[4, 1], initializer=rand())
variable_scope.get_variable(
"linear_model/sc_vocab/weights",
initializer=[[0.5], [1.], [2.], [3.]])
self._write_checkpoint(sess)
prev_keys_val = self.evaluate(sc_keys_weights)
# New graph, new session with warm-starting.
with ops.Graph().as_default() as g:
with self.session(graph=g) as sess:
cols_to_vars = self._create_linear_model(all_linear_cols,
partitioner=None)
vocab_info = ws_util.VocabInfo(
new_vocab=sc_vocab.vocabulary_file,
new_vocab_size=sc_vocab.vocabulary_size,
num_oov_buckets=sc_vocab.num_oov_buckets,
old_vocab=prev_vocab_path)
ws_util.warm_start(
self.get_temp_dir(),
vars_to_warm_start=".*(sc_keys|sc_vocab).*",
var_name_to_vocab_info={
ws_util._infer_var_name(cols_to_vars[sc_vocab]): vocab_info
},
var_name_to_prev_var_name={
ws_util._infer_var_name(cols_to_vars[sc_keys]):
"some_other_name"
})
self.evaluate(variables.global_variables_initializer())
# Verify weights were correctly warm-started. Var corresponding to
# sc_hash should not be warm-started. Var corresponding to sc_vocab
# should be correctly warm-started after vocab remapping.
self._assert_cols_to_vars(cols_to_vars, {
sc_keys: [prev_keys_val],
sc_hash: [np.zeros([15, 1])],
sc_vocab: [np.array([[3.], [2.], [1.], [0.5], [0.], [0.]])]
}, sess)
def testWarmStartVarsToWarmstartIsNone(self):
# Create old and new vocabs for sparse column "sc_vocab".
prev_vocab_path = self._write_vocab(["apple", "banana", "guava", "orange"],
"old_vocab")
new_vocab_path = self._write_vocab(
["orange", "guava", "banana", "apple", "raspberry",
"blueberry"], "new_vocab")
# Create feature columns.
sc_hash = fc.categorical_column_with_hash_bucket(
"sc_hash", hash_bucket_size=15)
sc_keys = fc.categorical_column_with_vocabulary_list(
"sc_keys", vocabulary_list=["a", "b", "c", "e"])
sc_vocab = fc.categorical_column_with_vocabulary_file(
"sc_vocab", vocabulary_file=new_vocab_path, vocabulary_size=6)
all_linear_cols = [sc_hash, sc_keys, sc_vocab]
# Save checkpoint from which to warm-start.
with ops.Graph().as_default() as g:
with self.session(graph=g) as sess:
variable_scope.get_variable(
"linear_model/sc_hash/weights", shape=[15, 1], initializer=norms())
variable_scope.get_variable(
"some_other_name", shape=[4, 1], initializer=rand())
variable_scope.get_variable(
"linear_model/sc_vocab/weights",
initializer=[[0.5], [1.], [2.], [3.]])
self._write_checkpoint(sess)
def _partitioner(shape, dtype): # pylint:disable=unused-argument
# Partition each var into 2 equal slices.
partitions = [1] * len(shape)
partitions[0] = min(2, shape.dims[0].value)
return partitions
# New graph, new session with warm-starting.
with ops.Graph().as_default() as g:
with self.session(graph=g) as sess:
cols_to_vars = self._create_linear_model(all_linear_cols, _partitioner)
vocab_info = ws_util.VocabInfo(
new_vocab=sc_vocab.vocabulary_file,
new_vocab_size=sc_vocab.vocabulary_size,
num_oov_buckets=sc_vocab.num_oov_buckets,
old_vocab=prev_vocab_path)
ws_util.warm_start(
self.get_temp_dir(),
# The special value of None here will ensure that only the variable
# specified in var_name_to_vocab_info (sc_vocab embedding) is
# warm-started.
vars_to_warm_start=None,
var_name_to_vocab_info={
ws_util._infer_var_name(cols_to_vars[sc_vocab]): vocab_info
},
# Even though this is provided, the None value for
# vars_to_warm_start overrides the logic, and this will not be
# warm-started.
var_name_to_prev_var_name={
ws_util._infer_var_name(cols_to_vars[sc_keys]):
"some_other_name"
})
self.evaluate(variables.global_variables_initializer())
# Verify weights were correctly warm-started. Var corresponding to
# sc_vocab should be correctly warm-started after vocab remapping,
# and neither of the other two should be warm-started..
self._assert_cols_to_vars(cols_to_vars, {
sc_keys: [np.zeros([2, 1]), np.zeros([2, 1])],
sc_hash: [np.zeros([8, 1]), np.zeros([7, 1])],
sc_vocab: [
np.array([[3.], [2.], [1.]]),
np.array([[0.5], [0.], [0.]])
]
}, sess)
def testWarmStartEmbeddingColumn(self):
# Create old and new vocabs for embedding column "sc_vocab".
prev_vocab_path = self._write_vocab(["apple", "banana", "guava", "orange"],
"old_vocab")
new_vocab_path = self._write_vocab(
["orange", "guava", "banana", "apple", "raspberry", "blueberry"],
"new_vocab")
# Save checkpoint from which to warm-start.
with ops.Graph().as_default() as g:
with self.session(graph=g) as sess:
variable_scope.get_variable(
"input_layer/sc_vocab_embedding/embedding_weights",
initializer=[[0.5, 0.4], [1., 1.1], [2., 2.2], [3., 3.3]])
self._write_checkpoint(sess)
def _partitioner(shape, dtype): # pylint:disable=unused-argument
# Partition each var into 2 equal slices.
partitions = [1] * len(shape)
partitions[0] = min(2, shape.dims[0].value)
return partitions
# Create feature columns.
sc_vocab = fc.categorical_column_with_vocabulary_file(
"sc_vocab", vocabulary_file=new_vocab_path, vocabulary_size=6)
emb_vocab_column = fc.embedding_column(
categorical_column=sc_vocab,
dimension=2)
all_deep_cols = [emb_vocab_column]
# New graph, new session with warm-starting.
with ops.Graph().as_default() as g:
with self.session(graph=g) as sess:
cols_to_vars = {}
with variable_scope.variable_scope("", partitioner=_partitioner):
# Create the variables.
fc.input_layer(
features=self._create_dummy_inputs(),
feature_columns=all_deep_cols,
cols_to_vars=cols_to_vars)
vocab_info = ws_util.VocabInfo(
new_vocab=sc_vocab.vocabulary_file,
new_vocab_size=sc_vocab.vocabulary_size,
num_oov_buckets=sc_vocab.num_oov_buckets,
old_vocab=prev_vocab_path,
# Can't use constant_initializer with load_and_remap. In practice,
# use a truncated normal initializer.
backup_initializer=init_ops.random_uniform_initializer(
minval=0.42, maxval=0.42))
ws_util.warm_start(
self.get_temp_dir(),
var_name_to_vocab_info={
ws_util._infer_var_name(cols_to_vars[emb_vocab_column]):
vocab_info
})
self.evaluate(variables.global_variables_initializer())
# Verify weights were correctly warm-started. Var corresponding to
# emb_vocab_column should be correctly warm-started after vocab
# remapping. Missing values are filled in with the EmbeddingColumn's
# initializer.
self._assert_cols_to_vars(
cols_to_vars, {
emb_vocab_column: [
np.array([[3., 3.3], [2., 2.2], [1., 1.1]]),
np.array([[0.5, 0.4], [0.42, 0.42], [0.42, 0.42]])
]
}, sess)
def testWarmStartEmbeddingColumnLinearModel(self):
# Create old and new vocabs for embedding column "sc_vocab".
prev_vocab_path = self._write_vocab(["apple", "banana", "guava", "orange"],
"old_vocab")
new_vocab_path = self._write_vocab(
["orange", "guava", "banana", "apple", "raspberry", "blueberry"],
"new_vocab")
# Save checkpoint from which to warm-start.
with ops.Graph().as_default() as g:
with self.session(graph=g) as sess:
variable_scope.get_variable(
"linear_model/sc_vocab_embedding/embedding_weights",
initializer=[[0.5, 0.4], [1., 1.1], [2., 2.2], [3., 3.3]])
variable_scope.get_variable(
"linear_model/sc_vocab_embedding/weights",
initializer=[[0.69], [0.71]])
self._write_checkpoint(sess)
def _partitioner(shape, dtype): # pylint:disable=unused-argument
# Partition each var into 2 equal slices.
partitions = [1] * len(shape)
partitions[0] = min(2, shape.dims[0].value)
return partitions
# Create feature columns.
sc_vocab = fc.categorical_column_with_vocabulary_file(
"sc_vocab", vocabulary_file=new_vocab_path, vocabulary_size=6)
emb_vocab = fc.embedding_column(
categorical_column=sc_vocab,
dimension=2)
all_deep_cols = [emb_vocab]
# New graph, new session with warm-starting.
with ops.Graph().as_default() as g:
with self.session(graph=g) as sess:
cols_to_vars = {}
with variable_scope.variable_scope("", partitioner=_partitioner):
# Create the variables.
fc.linear_model(
features=self._create_dummy_inputs(),
feature_columns=all_deep_cols,
cols_to_vars=cols_to_vars)
# Construct the vocab_info for the embedding weight.
vocab_info = ws_util.VocabInfo(
new_vocab=sc_vocab.vocabulary_file,
new_vocab_size=sc_vocab.vocabulary_size,
num_oov_buckets=sc_vocab.num_oov_buckets,
old_vocab=prev_vocab_path,
# Can't use constant_initializer with load_and_remap. In practice,
# use a truncated normal initializer.
backup_initializer=init_ops.random_uniform_initializer(
minval=0.42, maxval=0.42))
ws_util.warm_start(
self.get_temp_dir(),
vars_to_warm_start=".*sc_vocab.*",
var_name_to_vocab_info={
"linear_model/sc_vocab_embedding/embedding_weights": vocab_info
})
self.evaluate(variables.global_variables_initializer())
# Verify weights were correctly warm-started. Var corresponding to
# emb_vocab should be correctly warm-started after vocab remapping.
# Missing values are filled in with the EmbeddingColumn's initializer.
self._assert_cols_to_vars(
cols_to_vars,
{
emb_vocab: [
# linear weights part 0.
np.array([[0.69]]),
# linear weights part 1.
np.array([[0.71]]),
# embedding_weights part 0.
np.array([[3., 3.3], [2., 2.2], [1., 1.1]]),
# embedding_weights part 1.
np.array([[0.5, 0.4], [0.42, 0.42], [0.42, 0.42]])
]
},
sess)
def testErrorConditions(self):
x = variable_scope.get_variable(
"x",
shape=[4, 1],
initializer=ones(),
partitioner=lambda shape, dtype: [2, 1])
# List of PartitionedVariable is invalid type when warm-starting with vocab.
self.assertRaises(TypeError, ws_util._warm_start_var_with_vocab, [x],
"/tmp", 5, "/tmp", "/tmp")
# Unused variable names raises ValueError.
with ops.Graph().as_default():
with self.cached_session() as sess:
x = variable_scope.get_variable(
"x",
shape=[4, 1],
initializer=ones(),
partitioner=lambda shape, dtype: [2, 1])
self._write_checkpoint(sess)
self.assertRaises(
ValueError,
ws_util.warm_start,
self.get_temp_dir(),
var_name_to_vocab_info={"y": ws_util.VocabInfo("", 1, 0, "")})
self.assertRaises(
ValueError,
ws_util.warm_start,
self.get_temp_dir(),
var_name_to_prev_var_name={"y": "y2"})
def testWarmStartFromObjectBasedCheckpoint(self):
prev_val = [[0.5], [1.], [1.5], [2.]]
with ops.Graph().as_default() as g:
with self.session(graph=g):
prev_var = variable_scope.get_variable(
"fruit_weights",
initializer=prev_val)
self.evaluate(variables.global_variables_initializer())
# Save object-based checkpoint.
tracking_util.Checkpoint(v=prev_var).save(
os.path.join(self.get_temp_dir(), "checkpoint"))
with ops.Graph().as_default() as g:
with self.session(graph=g):
fruit_weights = variable_scope.get_variable(
"fruit_weights", initializer=[[0.], [0.], [0.], [0.]])
ws_util.warm_start(self.get_temp_dir())
self.evaluate(variables.global_variables_initializer())
self.assertAllClose(prev_val, self.evaluate(fruit_weights))
if __name__ == "__main__":
test.main()
| WarmStartingUtilTest |
python | pypa__pipenv | pipenv/patched/pip/_vendor/rich/text.py | {
"start": 2976,
"end": 47561
} | class ____(JupyterMixin):
"""Text with color / style.
Args:
text (str, optional): Default unstyled text. Defaults to "".
style (Union[str, Style], optional): Base style for text. Defaults to "".
justify (str, optional): Justify method: "left", "center", "full", "right". Defaults to None.
overflow (str, optional): Overflow method: "crop", "fold", "ellipsis". Defaults to None.
no_wrap (bool, optional): Disable text wrapping, or None for default. Defaults to None.
end (str, optional): Character to end text with. Defaults to "\\\\n".
tab_size (int): Number of spaces per tab, or ``None`` to use ``console.tab_size``. Defaults to None.
spans (List[Span], optional). A list of predefined style spans. Defaults to None.
"""
__slots__ = [
"_text",
"style",
"justify",
"overflow",
"no_wrap",
"end",
"tab_size",
"_spans",
"_length",
]
def __init__(
self,
text: str = "",
style: Union[str, Style] = "",
*,
justify: Optional["JustifyMethod"] = None,
overflow: Optional["OverflowMethod"] = None,
no_wrap: Optional[bool] = None,
end: str = "\n",
tab_size: Optional[int] = None,
spans: Optional[List[Span]] = None,
) -> None:
sanitized_text = strip_control_codes(text)
self._text = [sanitized_text]
self.style = style
self.justify: Optional["JustifyMethod"] = justify
self.overflow: Optional["OverflowMethod"] = overflow
self.no_wrap = no_wrap
self.end = end
self.tab_size = tab_size
self._spans: List[Span] = spans or []
self._length: int = len(sanitized_text)
def __len__(self) -> int:
return self._length
def __bool__(self) -> bool:
return bool(self._length)
def __str__(self) -> str:
return self.plain
def __repr__(self) -> str:
return f"<text {self.plain!r} {self._spans!r} {self.style!r}>"
def __add__(self, other: Any) -> "Text":
if isinstance(other, (str, Text)):
result = self.copy()
result.append(other)
return result
return NotImplemented
def __eq__(self, other: object) -> bool:
if not isinstance(other, Text):
return NotImplemented
return self.plain == other.plain and self._spans == other._spans
def __contains__(self, other: object) -> bool:
if isinstance(other, str):
return other in self.plain
elif isinstance(other, Text):
return other.plain in self.plain
return False
def __getitem__(self, slice: Union[int, slice]) -> "Text":
def get_text_at(offset: int) -> "Text":
_Span = Span
text = Text(
self.plain[offset],
spans=[
_Span(0, 1, style)
for start, end, style in self._spans
if end > offset >= start
],
end="",
)
return text
if isinstance(slice, int):
return get_text_at(slice)
else:
start, stop, step = slice.indices(len(self.plain))
if step == 1:
lines = self.divide([start, stop])
return lines[1]
else:
# This would be a bit of work to implement efficiently
# For now, its not required
raise TypeError("slices with step!=1 are not supported")
@property
def cell_len(self) -> int:
"""Get the number of cells required to render this text."""
return cell_len(self.plain)
@property
def markup(self) -> str:
"""Get console markup to render this Text.
Returns:
str: A string potentially creating markup tags.
"""
from .markup import escape
output: List[str] = []
plain = self.plain
markup_spans = [
(0, False, self.style),
*((span.start, False, span.style) for span in self._spans),
*((span.end, True, span.style) for span in self._spans),
(len(plain), True, self.style),
]
markup_spans.sort(key=itemgetter(0, 1))
position = 0
append = output.append
for offset, closing, style in markup_spans:
if offset > position:
append(escape(plain[position:offset]))
position = offset
if style:
append(f"[/{style}]" if closing else f"[{style}]")
markup = "".join(output)
return markup
@classmethod
def from_markup(
cls,
text: str,
*,
style: Union[str, Style] = "",
emoji: bool = True,
emoji_variant: Optional[EmojiVariant] = None,
justify: Optional["JustifyMethod"] = None,
overflow: Optional["OverflowMethod"] = None,
end: str = "\n",
) -> "Text":
"""Create Text instance from markup.
Args:
text (str): A string containing console markup.
style (Union[str, Style], optional): Base style for text. Defaults to "".
emoji (bool, optional): Also render emoji code. Defaults to True.
emoji_variant (str, optional): Optional emoji variant, either "text" or "emoji". Defaults to None.
justify (str, optional): Justify method: "left", "center", "full", "right". Defaults to None.
overflow (str, optional): Overflow method: "crop", "fold", "ellipsis". Defaults to None.
end (str, optional): Character to end text with. Defaults to "\\\\n".
Returns:
Text: A Text instance with markup rendered.
"""
from .markup import render
rendered_text = render(text, style, emoji=emoji, emoji_variant=emoji_variant)
rendered_text.justify = justify
rendered_text.overflow = overflow
rendered_text.end = end
return rendered_text
@classmethod
def from_ansi(
cls,
text: str,
*,
style: Union[str, Style] = "",
justify: Optional["JustifyMethod"] = None,
overflow: Optional["OverflowMethod"] = None,
no_wrap: Optional[bool] = None,
end: str = "\n",
tab_size: Optional[int] = 8,
) -> "Text":
"""Create a Text object from a string containing ANSI escape codes.
Args:
text (str): A string containing escape codes.
style (Union[str, Style], optional): Base style for text. Defaults to "".
justify (str, optional): Justify method: "left", "center", "full", "right". Defaults to None.
overflow (str, optional): Overflow method: "crop", "fold", "ellipsis". Defaults to None.
no_wrap (bool, optional): Disable text wrapping, or None for default. Defaults to None.
end (str, optional): Character to end text with. Defaults to "\\\\n".
tab_size (int): Number of spaces per tab, or ``None`` to use ``console.tab_size``. Defaults to None.
"""
from .ansi import AnsiDecoder
joiner = Text(
"\n",
justify=justify,
overflow=overflow,
no_wrap=no_wrap,
end=end,
tab_size=tab_size,
style=style,
)
decoder = AnsiDecoder()
result = joiner.join(line for line in decoder.decode(text))
return result
@classmethod
def styled(
cls,
text: str,
style: StyleType = "",
*,
justify: Optional["JustifyMethod"] = None,
overflow: Optional["OverflowMethod"] = None,
) -> "Text":
"""Construct a Text instance with a pre-applied styled. A style applied in this way won't be used
to pad the text when it is justified.
Args:
text (str): A string containing console markup.
style (Union[str, Style]): Style to apply to the text. Defaults to "".
justify (str, optional): Justify method: "left", "center", "full", "right". Defaults to None.
overflow (str, optional): Overflow method: "crop", "fold", "ellipsis". Defaults to None.
Returns:
Text: A text instance with a style applied to the entire string.
"""
styled_text = cls(text, justify=justify, overflow=overflow)
styled_text.stylize(style)
return styled_text
@classmethod
def assemble(
cls,
*parts: Union[str, "Text", Tuple[str, StyleType]],
style: Union[str, Style] = "",
justify: Optional["JustifyMethod"] = None,
overflow: Optional["OverflowMethod"] = None,
no_wrap: Optional[bool] = None,
end: str = "\n",
tab_size: int = 8,
meta: Optional[Dict[str, Any]] = None,
) -> "Text":
"""Construct a text instance by combining a sequence of strings with optional styles.
The positional arguments should be either strings, or a tuple of string + style.
Args:
style (Union[str, Style], optional): Base style for text. Defaults to "".
justify (str, optional): Justify method: "left", "center", "full", "right". Defaults to None.
overflow (str, optional): Overflow method: "crop", "fold", "ellipsis". Defaults to None.
no_wrap (bool, optional): Disable text wrapping, or None for default. Defaults to None.
end (str, optional): Character to end text with. Defaults to "\\\\n".
tab_size (int): Number of spaces per tab, or ``None`` to use ``console.tab_size``. Defaults to None.
meta (Dict[str, Any], optional). Meta data to apply to text, or None for no meta data. Default to None
Returns:
Text: A new text instance.
"""
text = cls(
style=style,
justify=justify,
overflow=overflow,
no_wrap=no_wrap,
end=end,
tab_size=tab_size,
)
append = text.append
_Text = Text
for part in parts:
if isinstance(part, (_Text, str)):
append(part)
else:
append(*part)
if meta:
text.apply_meta(meta)
return text
@property
def plain(self) -> str:
"""Get the text as a single string."""
if len(self._text) != 1:
self._text[:] = ["".join(self._text)]
return self._text[0]
@plain.setter
def plain(self, new_text: str) -> None:
"""Set the text to a new value."""
if new_text != self.plain:
sanitized_text = strip_control_codes(new_text)
self._text[:] = [sanitized_text]
old_length = self._length
self._length = len(sanitized_text)
if old_length > self._length:
self._trim_spans()
@property
def spans(self) -> List[Span]:
"""Get a reference to the internal list of spans."""
return self._spans
@spans.setter
def spans(self, spans: List[Span]) -> None:
"""Set spans."""
self._spans = spans[:]
def blank_copy(self, plain: str = "") -> "Text":
"""Return a new Text instance with copied metadata (but not the string or spans)."""
copy_self = Text(
plain,
style=self.style,
justify=self.justify,
overflow=self.overflow,
no_wrap=self.no_wrap,
end=self.end,
tab_size=self.tab_size,
)
return copy_self
def copy(self) -> "Text":
"""Return a copy of this instance."""
copy_self = Text(
self.plain,
style=self.style,
justify=self.justify,
overflow=self.overflow,
no_wrap=self.no_wrap,
end=self.end,
tab_size=self.tab_size,
)
copy_self._spans[:] = self._spans
return copy_self
def stylize(
self,
style: Union[str, Style],
start: int = 0,
end: Optional[int] = None,
) -> None:
"""Apply a style to the text, or a portion of the text.
Args:
style (Union[str, Style]): Style instance or style definition to apply.
start (int): Start offset (negative indexing is supported). Defaults to 0.
end (Optional[int], optional): End offset (negative indexing is supported), or None for end of text. Defaults to None.
"""
if style:
length = len(self)
if start < 0:
start = length + start
if end is None:
end = length
if end < 0:
end = length + end
if start >= length or end <= start:
# Span not in text or not valid
return
self._spans.append(Span(start, min(length, end), style))
def stylize_before(
self,
style: Union[str, Style],
start: int = 0,
end: Optional[int] = None,
) -> None:
"""Apply a style to the text, or a portion of the text. Styles will be applied before other styles already present.
Args:
style (Union[str, Style]): Style instance or style definition to apply.
start (int): Start offset (negative indexing is supported). Defaults to 0.
end (Optional[int], optional): End offset (negative indexing is supported), or None for end of text. Defaults to None.
"""
if style:
length = len(self)
if start < 0:
start = length + start
if end is None:
end = length
if end < 0:
end = length + end
if start >= length or end <= start:
# Span not in text or not valid
return
self._spans.insert(0, Span(start, min(length, end), style))
def apply_meta(
self, meta: Dict[str, Any], start: int = 0, end: Optional[int] = None
) -> None:
"""Apply metadata to the text, or a portion of the text.
Args:
meta (Dict[str, Any]): A dict of meta information.
start (int): Start offset (negative indexing is supported). Defaults to 0.
end (Optional[int], optional): End offset (negative indexing is supported), or None for end of text. Defaults to None.
"""
style = Style.from_meta(meta)
self.stylize(style, start=start, end=end)
def on(self, meta: Optional[Dict[str, Any]] = None, **handlers: Any) -> "Text":
"""Apply event handlers (used by Textual project).
Example:
>>> from rich.text import Text
>>> text = Text("hello world")
>>> text.on(click="view.toggle('world')")
Args:
meta (Dict[str, Any]): Mapping of meta information.
**handlers: Keyword args are prefixed with "@" to defined handlers.
Returns:
Text: Self is returned to method may be chained.
"""
meta = {} if meta is None else meta
meta.update({f"@{key}": value for key, value in handlers.items()})
self.stylize(Style.from_meta(meta))
return self
def remove_suffix(self, suffix: str) -> None:
"""Remove a suffix if it exists.
Args:
suffix (str): Suffix to remove.
"""
if self.plain.endswith(suffix):
self.right_crop(len(suffix))
def get_style_at_offset(self, console: "Console", offset: int) -> Style:
"""Get the style of a character at give offset.
Args:
console (~Console): Console where text will be rendered.
offset (int): Offset in to text (negative indexing supported)
Returns:
Style: A Style instance.
"""
# TODO: This is a little inefficient, it is only used by full justify
if offset < 0:
offset = len(self) + offset
get_style = console.get_style
style = get_style(self.style).copy()
for start, end, span_style in self._spans:
if end > offset >= start:
style += get_style(span_style, default="")
return style
def extend_style(self, spaces: int) -> None:
"""Extend the Text given number of spaces where the spaces have the same style as the last character.
Args:
spaces (int): Number of spaces to add to the Text.
"""
if spaces <= 0:
return
spans = self.spans
new_spaces = " " * spaces
if spans:
end_offset = len(self)
self._spans[:] = [
span.extend(spaces) if span.end >= end_offset else span
for span in spans
]
self._text.append(new_spaces)
self._length += spaces
else:
self.plain += new_spaces
def highlight_regex(
self,
re_highlight: Union[Pattern[str], str],
style: Optional[Union[GetStyleCallable, StyleType]] = None,
*,
style_prefix: str = "",
) -> int:
"""Highlight text with a regular expression, where group names are
translated to styles.
Args:
re_highlight (Union[re.Pattern, str]): A regular expression object or string.
style (Union[GetStyleCallable, StyleType]): Optional style to apply to whole match, or a callable
which accepts the matched text and returns a style. Defaults to None.
style_prefix (str, optional): Optional prefix to add to style group names.
Returns:
int: Number of regex matches
"""
count = 0
append_span = self._spans.append
_Span = Span
plain = self.plain
if isinstance(re_highlight, str):
re_highlight = re.compile(re_highlight)
for match in re_highlight.finditer(plain):
get_span = match.span
if style:
start, end = get_span()
match_style = style(plain[start:end]) if callable(style) else style
if match_style is not None and end > start:
append_span(_Span(start, end, match_style))
count += 1
for name in match.groupdict().keys():
start, end = get_span(name)
if start != -1 and end > start:
append_span(_Span(start, end, f"{style_prefix}{name}"))
return count
def highlight_words(
self,
words: Iterable[str],
style: Union[str, Style],
*,
case_sensitive: bool = True,
) -> int:
"""Highlight words with a style.
Args:
words (Iterable[str]): Words to highlight.
style (Union[str, Style]): Style to apply.
case_sensitive (bool, optional): Enable case sensitive matching. Defaults to True.
Returns:
int: Number of words highlighted.
"""
re_words = "|".join(re.escape(word) for word in words)
add_span = self._spans.append
count = 0
_Span = Span
for match in re.finditer(
re_words, self.plain, flags=0 if case_sensitive else re.IGNORECASE
):
start, end = match.span(0)
add_span(_Span(start, end, style))
count += 1
return count
def rstrip(self) -> None:
"""Strip whitespace from end of text."""
self.plain = self.plain.rstrip()
def rstrip_end(self, size: int) -> None:
"""Remove whitespace beyond a certain width at the end of the text.
Args:
size (int): The desired size of the text.
"""
text_length = len(self)
if text_length > size:
excess = text_length - size
whitespace_match = _re_whitespace.search(self.plain)
if whitespace_match is not None:
whitespace_count = len(whitespace_match.group(0))
self.right_crop(min(whitespace_count, excess))
def set_length(self, new_length: int) -> None:
"""Set new length of the text, clipping or padding is required."""
length = len(self)
if length != new_length:
if length < new_length:
self.pad_right(new_length - length)
else:
self.right_crop(length - new_length)
def __rich_console__(
self, console: "Console", options: "ConsoleOptions"
) -> Iterable[Segment]:
tab_size: int = console.tab_size if self.tab_size is None else self.tab_size
justify = self.justify or options.justify or DEFAULT_JUSTIFY
overflow = self.overflow or options.overflow or DEFAULT_OVERFLOW
lines = self.wrap(
console,
options.max_width,
justify=justify,
overflow=overflow,
tab_size=tab_size or 8,
no_wrap=pick_bool(self.no_wrap, options.no_wrap, False),
)
all_lines = Text("\n").join(lines)
yield from all_lines.render(console, end=self.end)
def __rich_measure__(
self, console: "Console", options: "ConsoleOptions"
) -> Measurement:
text = self.plain
lines = text.splitlines()
max_text_width = max(cell_len(line) for line in lines) if lines else 0
words = text.split()
min_text_width = (
max(cell_len(word) for word in words) if words else max_text_width
)
return Measurement(min_text_width, max_text_width)
def render(self, console: "Console", end: str = "") -> Iterable["Segment"]:
"""Render the text as Segments.
Args:
console (Console): Console instance.
end (Optional[str], optional): Optional end character.
Returns:
Iterable[Segment]: Result of render that may be written to the console.
"""
_Segment = Segment
text = self.plain
if not self._spans:
yield Segment(text)
if end:
yield _Segment(end)
return
get_style = partial(console.get_style, default=Style.null())
enumerated_spans = list(enumerate(self._spans, 1))
style_map = {index: get_style(span.style) for index, span in enumerated_spans}
style_map[0] = get_style(self.style)
spans = [
(0, False, 0),
*((span.start, False, index) for index, span in enumerated_spans),
*((span.end, True, index) for index, span in enumerated_spans),
(len(text), True, 0),
]
spans.sort(key=itemgetter(0, 1))
stack: List[int] = []
stack_append = stack.append
stack_pop = stack.remove
style_cache: Dict[Tuple[Style, ...], Style] = {}
style_cache_get = style_cache.get
combine = Style.combine
def get_current_style() -> Style:
"""Construct current style from stack."""
styles = tuple(style_map[_style_id] for _style_id in sorted(stack))
cached_style = style_cache_get(styles)
if cached_style is not None:
return cached_style
current_style = combine(styles)
style_cache[styles] = current_style
return current_style
for (offset, leaving, style_id), (next_offset, _, _) in zip(spans, spans[1:]):
if leaving:
stack_pop(style_id)
else:
stack_append(style_id)
if next_offset > offset:
yield _Segment(text[offset:next_offset], get_current_style())
if end:
yield _Segment(end)
def join(self, lines: Iterable["Text"]) -> "Text":
"""Join text together with this instance as the separator.
Args:
lines (Iterable[Text]): An iterable of Text instances to join.
Returns:
Text: A new text instance containing join text.
"""
new_text = self.blank_copy()
def iter_text() -> Iterable["Text"]:
if self.plain:
for last, line in loop_last(lines):
yield line
if not last:
yield self
else:
yield from lines
extend_text = new_text._text.extend
append_span = new_text._spans.append
extend_spans = new_text._spans.extend
offset = 0
_Span = Span
for text in iter_text():
extend_text(text._text)
if text.style:
append_span(_Span(offset, offset + len(text), text.style))
extend_spans(
_Span(offset + start, offset + end, style)
for start, end, style in text._spans
)
offset += len(text)
new_text._length = offset
return new_text
def expand_tabs(self, tab_size: Optional[int] = None) -> None:
"""Converts tabs to spaces.
Args:
tab_size (int, optional): Size of tabs. Defaults to 8.
"""
if "\t" not in self.plain:
return
if tab_size is None:
tab_size = self.tab_size
if tab_size is None:
tab_size = 8
new_text: List[Text] = []
append = new_text.append
for line in self.split("\n", include_separator=True):
if "\t" not in line.plain:
append(line)
else:
cell_position = 0
parts = line.split("\t", include_separator=True)
for part in parts:
if part.plain.endswith("\t"):
part._text[-1] = part._text[-1][:-1] + " "
cell_position += part.cell_len
tab_remainder = cell_position % tab_size
if tab_remainder:
spaces = tab_size - tab_remainder
part.extend_style(spaces)
cell_position += spaces
else:
cell_position += part.cell_len
append(part)
result = Text("").join(new_text)
self._text = [result.plain]
self._length = len(self.plain)
self._spans[:] = result._spans
def truncate(
self,
max_width: int,
*,
overflow: Optional["OverflowMethod"] = None,
pad: bool = False,
) -> None:
"""Truncate text if it is longer that a given width.
Args:
max_width (int): Maximum number of characters in text.
overflow (str, optional): Overflow method: "crop", "fold", or "ellipsis". Defaults to None, to use self.overflow.
pad (bool, optional): Pad with spaces if the length is less than max_width. Defaults to False.
"""
_overflow = overflow or self.overflow or DEFAULT_OVERFLOW
if _overflow != "ignore":
length = cell_len(self.plain)
if length > max_width:
if _overflow == "ellipsis":
self.plain = set_cell_size(self.plain, max_width - 1) + "…"
else:
self.plain = set_cell_size(self.plain, max_width)
if pad and length < max_width:
spaces = max_width - length
self._text = [f"{self.plain}{' ' * spaces}"]
self._length = len(self.plain)
def _trim_spans(self) -> None:
"""Remove or modify any spans that are over the end of the text."""
max_offset = len(self.plain)
_Span = Span
self._spans[:] = [
(
span
if span.end < max_offset
else _Span(span.start, min(max_offset, span.end), span.style)
)
for span in self._spans
if span.start < max_offset
]
def pad(self, count: int, character: str = " ") -> None:
"""Pad left and right with a given number of characters.
Args:
count (int): Width of padding.
character (str): The character to pad with. Must be a string of length 1.
"""
assert len(character) == 1, "Character must be a string of length 1"
if count:
pad_characters = character * count
self.plain = f"{pad_characters}{self.plain}{pad_characters}"
_Span = Span
self._spans[:] = [
_Span(start + count, end + count, style)
for start, end, style in self._spans
]
def pad_left(self, count: int, character: str = " ") -> None:
"""Pad the left with a given character.
Args:
count (int): Number of characters to pad.
character (str, optional): Character to pad with. Defaults to " ".
"""
assert len(character) == 1, "Character must be a string of length 1"
if count:
self.plain = f"{character * count}{self.plain}"
_Span = Span
self._spans[:] = [
_Span(start + count, end + count, style)
for start, end, style in self._spans
]
def pad_right(self, count: int, character: str = " ") -> None:
"""Pad the right with a given character.
Args:
count (int): Number of characters to pad.
character (str, optional): Character to pad with. Defaults to " ".
"""
assert len(character) == 1, "Character must be a string of length 1"
if count:
self.plain = f"{self.plain}{character * count}"
def align(self, align: AlignMethod, width: int, character: str = " ") -> None:
"""Align text to a given width.
Args:
align (AlignMethod): One of "left", "center", or "right".
width (int): Desired width.
character (str, optional): Character to pad with. Defaults to " ".
"""
self.truncate(width)
excess_space = width - cell_len(self.plain)
if excess_space:
if align == "left":
self.pad_right(excess_space, character)
elif align == "center":
left = excess_space // 2
self.pad_left(left, character)
self.pad_right(excess_space - left, character)
else:
self.pad_left(excess_space, character)
def append(
self, text: Union["Text", str], style: Optional[Union[str, "Style"]] = None
) -> "Text":
"""Add text with an optional style.
Args:
text (Union[Text, str]): A str or Text to append.
style (str, optional): A style name. Defaults to None.
Returns:
Text: Returns self for chaining.
"""
if not isinstance(text, (str, Text)):
raise TypeError("Only str or Text can be appended to Text")
if len(text):
if isinstance(text, str):
sanitized_text = strip_control_codes(text)
self._text.append(sanitized_text)
offset = len(self)
text_length = len(sanitized_text)
if style:
self._spans.append(Span(offset, offset + text_length, style))
self._length += text_length
elif isinstance(text, Text):
_Span = Span
if style is not None:
raise ValueError(
"style must not be set when appending Text instance"
)
text_length = self._length
if text.style:
self._spans.append(
_Span(text_length, text_length + len(text), text.style)
)
self._text.append(text.plain)
self._spans.extend(
_Span(start + text_length, end + text_length, style)
for start, end, style in text._spans.copy()
)
self._length += len(text)
return self
def append_text(self, text: "Text") -> "Text":
"""Append another Text instance. This method is more performant that Text.append, but
only works for Text.
Args:
text (Text): The Text instance to append to this instance.
Returns:
Text: Returns self for chaining.
"""
_Span = Span
text_length = self._length
if text.style:
self._spans.append(_Span(text_length, text_length + len(text), text.style))
self._text.append(text.plain)
self._spans.extend(
_Span(start + text_length, end + text_length, style)
for start, end, style in text._spans.copy()
)
self._length += len(text)
return self
def append_tokens(
self, tokens: Iterable[Tuple[str, Optional[StyleType]]]
) -> "Text":
"""Append iterable of str and style. Style may be a Style instance or a str style definition.
Args:
tokens (Iterable[Tuple[str, Optional[StyleType]]]): An iterable of tuples containing str content and style.
Returns:
Text: Returns self for chaining.
"""
append_text = self._text.append
append_span = self._spans.append
_Span = Span
offset = len(self)
for content, style in tokens:
content = strip_control_codes(content)
append_text(content)
if style:
append_span(_Span(offset, offset + len(content), style))
offset += len(content)
self._length = offset
return self
def copy_styles(self, text: "Text") -> None:
"""Copy styles from another Text instance.
Args:
text (Text): A Text instance to copy styles from, must be the same length.
"""
self._spans.extend(text._spans)
def split(
self,
separator: str = "\n",
*,
include_separator: bool = False,
allow_blank: bool = False,
) -> Lines:
"""Split rich text in to lines, preserving styles.
Args:
separator (str, optional): String to split on. Defaults to "\\\\n".
include_separator (bool, optional): Include the separator in the lines. Defaults to False.
allow_blank (bool, optional): Return a blank line if the text ends with a separator. Defaults to False.
Returns:
List[RichText]: A list of rich text, one per line of the original.
"""
assert separator, "separator must not be empty"
text = self.plain
if separator not in text:
return Lines([self.copy()])
if include_separator:
lines = self.divide(
match.end() for match in re.finditer(re.escape(separator), text)
)
else:
def flatten_spans() -> Iterable[int]:
for match in re.finditer(re.escape(separator), text):
start, end = match.span()
yield start
yield end
lines = Lines(
line for line in self.divide(flatten_spans()) if line.plain != separator
)
if not allow_blank and text.endswith(separator):
lines.pop()
return lines
def divide(self, offsets: Iterable[int]) -> Lines:
"""Divide text in to a number of lines at given offsets.
Args:
offsets (Iterable[int]): Offsets used to divide text.
Returns:
Lines: New RichText instances between offsets.
"""
_offsets = list(offsets)
if not _offsets:
return Lines([self.copy()])
text = self.plain
text_length = len(text)
divide_offsets = [0, *_offsets, text_length]
line_ranges = list(zip(divide_offsets, divide_offsets[1:]))
style = self.style
justify = self.justify
overflow = self.overflow
_Text = Text
new_lines = Lines(
_Text(
text[start:end],
style=style,
justify=justify,
overflow=overflow,
)
for start, end in line_ranges
)
if not self._spans:
return new_lines
_line_appends = [line._spans.append for line in new_lines._lines]
line_count = len(line_ranges)
_Span = Span
for span_start, span_end, style in self._spans:
lower_bound = 0
upper_bound = line_count
start_line_no = (lower_bound + upper_bound) // 2
while True:
line_start, line_end = line_ranges[start_line_no]
if span_start < line_start:
upper_bound = start_line_no - 1
elif span_start > line_end:
lower_bound = start_line_no + 1
else:
break
start_line_no = (lower_bound + upper_bound) // 2
if span_end < line_end:
end_line_no = start_line_no
else:
end_line_no = lower_bound = start_line_no
upper_bound = line_count
while True:
line_start, line_end = line_ranges[end_line_no]
if span_end < line_start:
upper_bound = end_line_no - 1
elif span_end > line_end:
lower_bound = end_line_no + 1
else:
break
end_line_no = (lower_bound + upper_bound) // 2
for line_no in range(start_line_no, end_line_no + 1):
line_start, line_end = line_ranges[line_no]
new_start = max(0, span_start - line_start)
new_end = min(span_end - line_start, line_end - line_start)
if new_end > new_start:
_line_appends[line_no](_Span(new_start, new_end, style))
return new_lines
def right_crop(self, amount: int = 1) -> None:
"""Remove a number of characters from the end of the text."""
max_offset = len(self.plain) - amount
_Span = Span
self._spans[:] = [
(
span
if span.end < max_offset
else _Span(span.start, min(max_offset, span.end), span.style)
)
for span in self._spans
if span.start < max_offset
]
self._text = [self.plain[:-amount]]
self._length -= amount
def wrap(
self,
console: "Console",
width: int,
*,
justify: Optional["JustifyMethod"] = None,
overflow: Optional["OverflowMethod"] = None,
tab_size: int = 8,
no_wrap: Optional[bool] = None,
) -> Lines:
"""Word wrap the text.
Args:
console (Console): Console instance.
width (int): Number of cells available per line.
justify (str, optional): Justify method: "default", "left", "center", "full", "right". Defaults to "default".
overflow (str, optional): Overflow method: "crop", "fold", or "ellipsis". Defaults to None.
tab_size (int, optional): Default tab size. Defaults to 8.
no_wrap (bool, optional): Disable wrapping, Defaults to False.
Returns:
Lines: Number of lines.
"""
wrap_justify = justify or self.justify or DEFAULT_JUSTIFY
wrap_overflow = overflow or self.overflow or DEFAULT_OVERFLOW
no_wrap = pick_bool(no_wrap, self.no_wrap, False) or overflow == "ignore"
lines = Lines()
for line in self.split(allow_blank=True):
if "\t" in line:
line.expand_tabs(tab_size)
if no_wrap:
new_lines = Lines([line])
else:
offsets = divide_line(str(line), width, fold=wrap_overflow == "fold")
new_lines = line.divide(offsets)
for line in new_lines:
line.rstrip_end(width)
if wrap_justify:
new_lines.justify(
console, width, justify=wrap_justify, overflow=wrap_overflow
)
for line in new_lines:
line.truncate(width, overflow=wrap_overflow)
lines.extend(new_lines)
return lines
def fit(self, width: int) -> Lines:
"""Fit the text in to given width by chopping in to lines.
Args:
width (int): Maximum characters in a line.
Returns:
Lines: Lines container.
"""
lines: Lines = Lines()
append = lines.append
for line in self.split():
line.set_length(width)
append(line)
return lines
def detect_indentation(self) -> int:
"""Auto-detect indentation of code.
Returns:
int: Number of spaces used to indent code.
"""
_indentations = {
len(match.group(1))
for match in re.finditer(r"^( *)(.*)$", self.plain, flags=re.MULTILINE)
}
try:
indentation = (
reduce(gcd, [indent for indent in _indentations if not indent % 2]) or 1
)
except TypeError:
indentation = 1
return indentation
def with_indent_guides(
self,
indent_size: Optional[int] = None,
*,
character: str = "│",
style: StyleType = "dim green",
) -> "Text":
"""Adds indent guide lines to text.
Args:
indent_size (Optional[int]): Size of indentation, or None to auto detect. Defaults to None.
character (str, optional): Character to use for indentation. Defaults to "│".
style (Union[Style, str], optional): Style of indent guides.
Returns:
Text: New text with indentation guides.
"""
_indent_size = self.detect_indentation() if indent_size is None else indent_size
text = self.copy()
text.expand_tabs()
indent_line = f"{character}{' ' * (_indent_size - 1)}"
re_indent = re.compile(r"^( *)(.*)$")
new_lines: List[Text] = []
add_line = new_lines.append
blank_lines = 0
for line in text.split(allow_blank=True):
match = re_indent.match(line.plain)
if not match or not match.group(2):
blank_lines += 1
continue
indent = match.group(1)
full_indents, remaining_space = divmod(len(indent), _indent_size)
new_indent = f"{indent_line * full_indents}{' ' * remaining_space}"
line.plain = new_indent + line.plain[len(new_indent) :]
line.stylize(style, 0, len(new_indent))
if blank_lines:
new_lines.extend([Text(new_indent, style=style)] * blank_lines)
blank_lines = 0
add_line(line)
if blank_lines:
new_lines.extend([Text("", style=style)] * blank_lines)
new_text = text.blank_copy("\n").join(new_lines)
return new_text
if __name__ == "__main__": # pragma: no cover
from pipenv.patched.pip._vendor.rich.console import Console
text = Text(
"""\nLorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.\n"""
)
text.highlight_words(["Lorem"], "bold")
text.highlight_words(["ipsum"], "italic")
console = Console()
console.rule("justify='left'")
console.print(text, style="red")
console.print()
console.rule("justify='center'")
console.print(text, style="green", justify="center")
console.print()
console.rule("justify='right'")
console.print(text, style="blue", justify="right")
console.print()
console.rule("justify='full'")
console.print(text, style="magenta", justify="full")
console.print()
| Text |
python | walkccc__LeetCode | solutions/3512. Minimum Operations to Make Array Sum Divisible by K/3512.py | {
"start": 0,
"end": 100
} | class ____:
def minOperations(self, nums: list[int], k: int) -> int:
return sum(nums) % k
| Solution |
python | apache__airflow | providers/dbt/cloud/tests/unit/dbt/cloud/operators/test_dbt.py | {
"start": 3796,
"end": 27275
} | class ____:
def setup_method(self):
self.dag = DAG("test_dbt_cloud_job_run_op", schedule=None, start_date=DEFAULT_DATE)
self.mock_ti = MagicMock()
self.mock_context = {"ti": self.mock_ti}
self.config = {
"job_id": JOB_ID,
"check_interval": 1,
"timeout": 3,
"steps_override": ["dbt run --select my_first_dbt_model"],
"schema_override": "another_schema",
"additional_run_config": {"threads_override": 8},
}
@patch(
"airflow.providers.dbt.cloud.hooks.dbt.DbtCloudHook.get_job_run_status",
return_value=DbtCloudJobRunStatus.SUCCESS.value,
)
@patch("airflow.providers.dbt.cloud.operators.dbt.DbtCloudRunJobOperator.defer")
@patch("airflow.providers.dbt.cloud.hooks.dbt.DbtCloudHook.get_connection")
@patch("airflow.providers.dbt.cloud.hooks.dbt.DbtCloudHook.trigger_job_run")
def test_execute_succeeded_before_getting_deferred(
self, mock_trigger_job_run, mock_dbt_hook, mock_defer, mock_job_run_status
):
dbt_op = DbtCloudRunJobOperator(
dbt_cloud_conn_id=ACCOUNT_ID_CONN,
task_id=TASK_ID,
job_id=JOB_ID,
check_interval=1,
timeout=3,
dag=self.dag,
deferrable=True,
)
dbt_op.execute(MagicMock())
assert not mock_defer.called
@patch(
"airflow.providers.dbt.cloud.hooks.dbt.DbtCloudHook.get_job_run_status",
return_value=DbtCloudJobRunStatus.ERROR.value,
)
@patch("airflow.providers.dbt.cloud.operators.dbt.DbtCloudRunJobOperator.defer")
@patch("airflow.providers.dbt.cloud.hooks.dbt.DbtCloudHook.get_connection")
@patch(
"airflow.providers.dbt.cloud.hooks.dbt.DbtCloudHook.trigger_job_run",
return_value=mock_response_json(DEFAULT_ACCOUNT_JOB_RUN_RESPONSE),
)
def test_execute_failed_before_getting_deferred(
self, mock_trigger_job_run, mock_dbt_hook, mock_defer, mock_job_run_status
):
dbt_op = DbtCloudRunJobOperator(
dbt_cloud_conn_id=ACCOUNT_ID_CONN,
task_id=TASK_ID,
job_id=JOB_ID,
check_interval=1,
timeout=3,
dag=self.dag,
deferrable=True,
)
with pytest.raises(DbtCloudJobRunException):
dbt_op.execute(MagicMock())
assert not mock_defer.called
@pytest.mark.parametrize(
"status",
(
DbtCloudJobRunStatus.QUEUED.value,
DbtCloudJobRunStatus.STARTING.value,
DbtCloudJobRunStatus.RUNNING.value,
),
)
@patch(
"airflow.providers.dbt.cloud.hooks.dbt.DbtCloudHook.get_job_run_status",
)
@patch("airflow.providers.dbt.cloud.hooks.dbt.DbtCloudHook.get_connection")
@patch(
"airflow.providers.dbt.cloud.hooks.dbt.DbtCloudHook.trigger_job_run",
return_value=mock_response_json(DEFAULT_ACCOUNT_JOB_RUN_RESPONSE),
)
def test_dbt_run_job_op_async(self, mock_trigger_job_run, mock_dbt_hook, mock_job_run_status, status):
"""
Asserts that a task is deferred and an DbtCloudRunJobTrigger will be fired
when the DbtCloudRunJobOperator has deferrable param set to True
"""
mock_job_run_status.return_value = status
dbt_op = DbtCloudRunJobOperator(
dbt_cloud_conn_id=ACCOUNT_ID_CONN,
task_id=TASK_ID,
job_id=JOB_ID,
check_interval=1,
timeout=3,
dag=self.dag,
deferrable=True,
)
with pytest.raises(TaskDeferred) as exc:
dbt_op.execute(MagicMock())
assert isinstance(exc.value.trigger, DbtCloudRunJobTrigger), "Trigger is not a DbtCloudRunJobTrigger"
@patch(
"airflow.providers.dbt.cloud.hooks.dbt.DbtCloudHook.get_job_by_name",
return_value=mock_response_json(DEFAULT_ACCOUNT_JOB_RESPONSE),
)
@patch(
"airflow.providers.dbt.cloud.hooks.dbt.DbtCloudHook.get_job_run_status",
return_value=DbtCloudJobRunStatus.SUCCESS.value,
)
@patch("airflow.providers.dbt.cloud.hooks.dbt.DbtCloudHook.get_connection")
@patch(
"airflow.providers.dbt.cloud.hooks.dbt.DbtCloudHook.trigger_job_run",
return_value=mock_response_json(DEFAULT_ACCOUNT_JOB_RUN_RESPONSE),
)
def test_dbt_run_job_by_name(
self, mock_trigger_job_run, mock_dbt_hook, mock_job_run_status, mock_job_by_name
):
"""
Test alternative way to run a job by project,
environment and job name instead of job id.
"""
dbt_op = DbtCloudRunJobOperator(
dbt_cloud_conn_id=ACCOUNT_ID_CONN,
task_id=TASK_ID,
project_name=PROJECT_NAME,
environment_name=ENVIRONMENT_NAME,
job_name=JOB_NAME,
check_interval=1,
timeout=3,
dag=self.dag,
)
dbt_op.execute(MagicMock())
mock_trigger_job_run.assert_called_once()
@pytest.mark.parametrize(
argnames=("project_name", "environment_name", "job_name"),
argvalues=[
(None, ENVIRONMENT_NAME, JOB_NAME),
(PROJECT_NAME, "", JOB_NAME),
(PROJECT_NAME, ENVIRONMENT_NAME, None),
("", "", ""),
],
)
@patch(
"airflow.providers.dbt.cloud.hooks.dbt.DbtCloudHook.get_job_by_name",
return_value=mock_response_json(DEFAULT_ACCOUNT_JOB_RESPONSE),
)
@patch(
"airflow.providers.dbt.cloud.hooks.dbt.DbtCloudHook.get_job_run_status",
return_value=DbtCloudJobRunStatus.SUCCESS.value,
)
@patch("airflow.providers.dbt.cloud.hooks.dbt.DbtCloudHook.get_connection")
@patch(
"airflow.providers.dbt.cloud.hooks.dbt.DbtCloudHook.trigger_job_run",
return_value=mock_response_json(DEFAULT_ACCOUNT_JOB_RUN_RESPONSE),
)
def test_dbt_run_job_by_incorrect_name_raises_exception(
self,
mock_trigger_job_run,
mock_dbt_hook,
mock_job_run_status,
mock_job_by_name,
project_name,
environment_name,
job_name,
):
"""
Test alternative way to run a job by project,
environment and job name instead of job id.
This test is to check if the operator raises an exception
when the project, environment or job name is missing.
"""
dbt_op = DbtCloudRunJobOperator(
dbt_cloud_conn_id=ACCOUNT_ID_CONN,
task_id=TASK_ID,
project_name=project_name,
environment_name=environment_name,
job_name=job_name,
check_interval=1,
timeout=3,
dag=self.dag,
)
with pytest.raises(
ValueError,
match="Either job_id or project_name, environment_name, and job_name must be provided.",
):
dbt_op.execute(MagicMock())
mock_trigger_job_run.assert_not_called()
@patch.object(
DbtCloudHook, "trigger_job_run", return_value=mock_response_json(DEFAULT_ACCOUNT_JOB_RUN_RESPONSE)
)
@pytest.mark.parametrize(
("job_run_status", "expected_output"),
[
(DbtCloudJobRunStatus.SUCCESS.value, "success"),
(DbtCloudJobRunStatus.ERROR.value, "exception"),
(DbtCloudJobRunStatus.CANCELLED.value, "exception"),
(DbtCloudJobRunStatus.RUNNING.value, "timeout"),
(DbtCloudJobRunStatus.QUEUED.value, "timeout"),
(DbtCloudJobRunStatus.STARTING.value, "timeout"),
],
)
@pytest.mark.parametrize(
("conn_id", "account_id"),
[(ACCOUNT_ID_CONN, None), (NO_ACCOUNT_ID_CONN, ACCOUNT_ID)],
ids=["default_account", "explicit_account"],
)
def test_execute_wait_for_termination(
self, mock_run_job, conn_id, account_id, job_run_status, expected_output, time_machine
):
operator = DbtCloudRunJobOperator(
task_id=TASK_ID, dbt_cloud_conn_id=conn_id, account_id=account_id, dag=self.dag, **self.config
)
assert operator.dbt_cloud_conn_id == conn_id
assert operator.job_id == self.config["job_id"]
assert operator.account_id == account_id
assert operator.check_interval == self.config["check_interval"]
assert operator.timeout == self.config["timeout"]
assert operator.wait_for_termination
assert operator.steps_override == self.config["steps_override"]
assert operator.schema_override == self.config["schema_override"]
assert operator.additional_run_config == self.config["additional_run_config"]
# Freeze time for avoid real clock side effects
time_machine.move_to(timezone.datetime(1970, 1, 1), tick=False)
def fake_sleep(seconds):
# Shift frozen time every time we call a ``time.sleep`` during this test case.
# Because we freeze a time, we also need to add a small shift
# which is emulating time which we spent in a loop
overall_delta = timedelta(seconds=seconds) + timedelta(microseconds=42)
time_machine.shift(overall_delta)
with (
patch.object(DbtCloudHook, "get_job_run") as mock_get_job_run,
patch("airflow.providers.dbt.cloud.hooks.dbt.time.sleep", side_effect=fake_sleep),
):
mock_get_job_run.return_value.json.return_value = {
"data": {"status": job_run_status, "id": RUN_ID}
}
if expected_output == "success":
operator.execute(context=self.mock_context)
assert mock_run_job.return_value.data["id"] == RUN_ID
elif expected_output == "exception":
# The operator should fail if the job run fails or is cancelled.
error_message = r"has failed or has been cancelled\.$"
with pytest.raises(DbtCloudJobRunException, match=error_message):
operator.execute(context=self.mock_context)
else:
# Demonstrating the operator timing out after surpassing the configured timeout value.
timeout = self.config["timeout"]
error_message = rf"has not reached a terminal status after {timeout} seconds\.$"
with pytest.raises(DbtCloudJobRunException, match=error_message):
operator.execute(context=self.mock_context)
mock_run_job.assert_called_once_with(
account_id=account_id,
job_id=JOB_ID,
cause=f"Triggered via Apache Airflow by task {TASK_ID!r} in the {self.dag.dag_id} DAG.",
steps_override=self.config["steps_override"],
schema_override=self.config["schema_override"],
retry_from_failure=False,
additional_run_config=self.config["additional_run_config"],
)
if job_run_status in DbtCloudJobRunStatus.TERMINAL_STATUSES.value:
assert mock_get_job_run.call_count == 1
else:
# When the job run status is not in a terminal status or "Success", the operator will
# continue to call ``get_job_run()`` until a ``timeout`` number of seconds has passed
assert mock_get_job_run.call_count >= 1
# To make it more dynamic, try and calculate number of calls
max_number_of_calls = timeout // self.config["check_interval"] + 1
assert mock_get_job_run.call_count <= max_number_of_calls
@patch.object(DbtCloudHook, "trigger_job_run")
@pytest.mark.parametrize(
("conn_id", "account_id"),
[(ACCOUNT_ID_CONN, None), (NO_ACCOUNT_ID_CONN, ACCOUNT_ID)],
ids=["default_account", "explicit_account"],
)
def test_execute_no_wait_for_termination(self, mock_run_job, conn_id, account_id):
operator = DbtCloudRunJobOperator(
task_id=TASK_ID,
dbt_cloud_conn_id=conn_id,
account_id=account_id,
trigger_reason=None,
dag=self.dag,
wait_for_termination=False,
**self.config,
)
assert operator.dbt_cloud_conn_id == conn_id
assert operator.job_id == self.config["job_id"]
assert operator.account_id == account_id
assert operator.check_interval == self.config["check_interval"]
assert operator.timeout == self.config["timeout"]
assert not operator.wait_for_termination
assert operator.steps_override == self.config["steps_override"]
assert operator.schema_override == self.config["schema_override"]
assert operator.additional_run_config == self.config["additional_run_config"]
with patch.object(DbtCloudHook, "get_job_run") as mock_get_job_run:
operator.execute(context=self.mock_context)
mock_run_job.assert_called_once_with(
account_id=account_id,
job_id=JOB_ID,
cause=f"Triggered via Apache Airflow by task {TASK_ID!r} in the {self.dag.dag_id} DAG.",
steps_override=self.config["steps_override"],
schema_override=self.config["schema_override"],
retry_from_failure=False,
additional_run_config=self.config["additional_run_config"],
)
mock_get_job_run.assert_not_called()
@patch.object(DbtCloudHook, "get_job_runs")
@patch.object(DbtCloudHook, "trigger_job_run")
@pytest.mark.parametrize(
("conn_id", "account_id"),
[(ACCOUNT_ID_CONN, None), (NO_ACCOUNT_ID_CONN, ACCOUNT_ID)],
ids=["default_account", "explicit_account"],
)
def test_execute_no_wait_for_termination_and_reuse_existing_run(
self, mock_run_job, mock_get_job_runs, conn_id, account_id
):
mock_get_job_runs.return_value.json.return_value = {
"data": [
{
"id": 10000,
"status": 1,
"href": EXPECTED_JOB_RUN_OP_EXTRA_LINK.format(
account_id=DEFAULT_ACCOUNT_ID, project_id=PROJECT_ID, run_id=RUN_ID
),
},
{
"id": 10001,
"status": 2,
"href": EXPECTED_JOB_RUN_OP_EXTRA_LINK.format(
account_id=DEFAULT_ACCOUNT_ID, project_id=PROJECT_ID, run_id=RUN_ID
),
},
]
}
operator = DbtCloudRunJobOperator(
task_id=TASK_ID,
dbt_cloud_conn_id=conn_id,
account_id=account_id,
trigger_reason=None,
dag=self.dag,
wait_for_termination=False,
reuse_existing_run=True,
**self.config,
)
assert operator.dbt_cloud_conn_id == conn_id
assert operator.job_id == self.config["job_id"]
assert operator.account_id == account_id
assert operator.check_interval == self.config["check_interval"]
assert operator.timeout == self.config["timeout"]
assert not operator.wait_for_termination
assert operator.steps_override == self.config["steps_override"]
assert operator.schema_override == self.config["schema_override"]
assert operator.additional_run_config == self.config["additional_run_config"]
operator.execute(context=self.mock_context)
mock_run_job.assert_not_called()
mock_get_job_runs.assert_called_with(
account_id=account_id,
payload={
"job_definition_id": self.config["job_id"],
"status__in": str(list(DbtCloudJobRunStatus.NON_TERMINAL_STATUSES.value)),
"order_by": "-created_at",
},
)
@patch.object(DbtCloudHook, "trigger_job_run")
@pytest.mark.parametrize(
("conn_id", "account_id"),
[(ACCOUNT_ID_CONN, None), (NO_ACCOUNT_ID_CONN, ACCOUNT_ID)],
ids=["default_account", "explicit_account"],
)
def test_execute_retry_from_failure(self, mock_run_job, conn_id, account_id):
operator = DbtCloudRunJobOperator(
task_id=TASK_ID,
dbt_cloud_conn_id=conn_id,
account_id=account_id,
trigger_reason=None,
dag=self.dag,
retry_from_failure=True,
**self.config,
)
assert operator.dbt_cloud_conn_id == conn_id
assert operator.job_id == self.config["job_id"]
assert operator.account_id == account_id
assert operator.check_interval == self.config["check_interval"]
assert operator.timeout == self.config["timeout"]
assert operator.retry_from_failure
assert operator.steps_override == self.config["steps_override"]
assert operator.schema_override == self.config["schema_override"]
assert operator.additional_run_config == self.config["additional_run_config"]
operator.execute(context=self.mock_context)
mock_run_job.assert_called_once_with(
account_id=account_id,
job_id=JOB_ID,
cause=f"Triggered via Apache Airflow by task {TASK_ID!r} in the {self.dag.dag_id} DAG.",
steps_override=self.config["steps_override"],
schema_override=self.config["schema_override"],
retry_from_failure=True,
additional_run_config=self.config["additional_run_config"],
)
@patch.object(DbtCloudHook, "_run_and_get_response")
@pytest.mark.parametrize(
("conn_id", "account_id"),
[(ACCOUNT_ID_CONN, None), (NO_ACCOUNT_ID_CONN, ACCOUNT_ID)],
ids=["default_account", "explicit_account"],
)
def test_execute_retry_from_failure_run(self, mock_run_req, conn_id, account_id):
operator = DbtCloudRunJobOperator(
task_id=TASK_ID,
dbt_cloud_conn_id=conn_id,
account_id=account_id,
trigger_reason=None,
dag=self.dag,
retry_from_failure=True,
**self.config,
)
self.mock_context["ti"].try_number = 1
assert operator.dbt_cloud_conn_id == conn_id
assert operator.job_id == self.config["job_id"]
assert operator.account_id == account_id
assert operator.check_interval == self.config["check_interval"]
assert operator.timeout == self.config["timeout"]
assert operator.retry_from_failure
assert operator.steps_override == self.config["steps_override"]
assert operator.schema_override == self.config["schema_override"]
assert operator.additional_run_config == self.config["additional_run_config"]
operator.execute(context=self.mock_context)
mock_run_req.assert_called()
@patch.object(
DbtCloudHook, "_run_and_get_response", return_value=mock_response_json(JOB_RUN_ERROR_RESPONSE)
)
@patch.object(DbtCloudHook, "retry_failed_job_run")
@pytest.mark.parametrize(
("conn_id", "account_id"),
[(ACCOUNT_ID_CONN, None), (NO_ACCOUNT_ID_CONN, ACCOUNT_ID)],
ids=["default_account", "explicit_account"],
)
def test_execute_retry_from_failure_rerun(self, mock_run_req, mock_rerun_req, conn_id, account_id):
operator = DbtCloudRunJobOperator(
task_id=TASK_ID,
dbt_cloud_conn_id=conn_id,
account_id=account_id,
trigger_reason=None,
dag=self.dag,
retry_from_failure=True,
**self.config,
)
self.mock_context["ti"].try_number = 2
assert operator.dbt_cloud_conn_id == conn_id
assert operator.job_id == self.config["job_id"]
assert operator.account_id == account_id
assert operator.check_interval == self.config["check_interval"]
assert operator.timeout == self.config["timeout"]
assert operator.retry_from_failure
assert operator.steps_override == self.config["steps_override"]
assert operator.schema_override == self.config["schema_override"]
assert operator.additional_run_config == self.config["additional_run_config"]
operator.execute(context=self.mock_context)
mock_rerun_req.assert_called_once()
@patch.object(DbtCloudHook, "trigger_job_run")
@pytest.mark.parametrize(
("conn_id", "account_id"),
[(ACCOUNT_ID_CONN, None), (NO_ACCOUNT_ID_CONN, ACCOUNT_ID)],
ids=["default_account", "explicit_account"],
)
def test_custom_trigger_reason(self, mock_run_job, conn_id, account_id):
custom_trigger_reason = "Some other trigger reason."
operator = DbtCloudRunJobOperator(
task_id=TASK_ID,
dbt_cloud_conn_id=conn_id,
account_id=account_id,
trigger_reason=custom_trigger_reason,
dag=self.dag,
**self.config,
)
assert operator.trigger_reason == custom_trigger_reason
with patch.object(DbtCloudHook, "get_job_run") as mock_get_job_run:
mock_get_job_run.return_value.json.return_value = {
"data": {"status": DbtCloudJobRunStatus.SUCCESS.value, "id": RUN_ID}
}
operator.execute(context=self.mock_context)
mock_run_job.assert_called_once_with(
account_id=account_id,
job_id=JOB_ID,
cause=custom_trigger_reason,
steps_override=self.config["steps_override"],
schema_override=self.config["schema_override"],
retry_from_failure=False,
additional_run_config=self.config["additional_run_config"],
)
@pytest.mark.parametrize(
("conn_id", "account_id"),
[(ACCOUNT_ID_CONN, None), (NO_ACCOUNT_ID_CONN, ACCOUNT_ID)],
ids=["default_account", "explicit_account"],
)
@pytest.mark.db_test
def test_run_job_operator_link(
self, conn_id, account_id, create_task_instance_of_operator, request, mock_supervisor_comms
):
ti = create_task_instance_of_operator(
DbtCloudRunJobOperator,
dag_id="test_dbt_cloud_run_job_op_link",
task_id="trigger_dbt_cloud_job",
dbt_cloud_conn_id=conn_id,
job_id=JOB_ID,
account_id=account_id,
)
if request.node.callspec.id == "default_account":
_run_response = DEFAULT_ACCOUNT_JOB_RUN_RESPONSE
else:
_run_response = EXPLICIT_ACCOUNT_JOB_RUN_RESPONSE
ti.xcom_push(key="job_run_url", value=_run_response["data"]["href"])
if AIRFLOW_V_3_0_PLUS and mock_supervisor_comms:
mock_supervisor_comms.send.return_value = XComResult(
key="job_run_url",
value=EXPECTED_JOB_RUN_OP_EXTRA_LINK.format(
account_id=account_id or DEFAULT_ACCOUNT_ID,
project_id=PROJECT_ID,
run_id=_run_response["data"]["id"],
),
)
url = ti.task.operator_extra_links[0].get_link(operator=ti.task, ti_key=ti.key)
assert url == (
EXPECTED_JOB_RUN_OP_EXTRA_LINK.format(
account_id=account_id or DEFAULT_ACCOUNT_ID,
project_id=PROJECT_ID,
run_id=_run_response["data"]["id"],
)
)
| TestDbtCloudRunJobOperator |
python | django__django | tests/template_tests/filter_tests/test_default.py | {
"start": 165,
"end": 1339
} | class ____(SimpleTestCase):
"""
Literal string arguments to the default filter are always treated as
safe strings, regardless of the auto-escaping state.
Note: we have to use {"a": ""} here, otherwise the invalid template
variable string interferes with the test result.
"""
@setup({"default01": '{{ a|default:"x<" }}'})
def test_default01(self):
output = self.engine.render_to_string("default01", {"a": ""})
self.assertEqual(output, "x<")
@setup({"default02": '{% autoescape off %}{{ a|default:"x<" }}{% endautoescape %}'})
def test_default02(self):
output = self.engine.render_to_string("default02", {"a": ""})
self.assertEqual(output, "x<")
@setup({"default03": '{{ a|default:"x<" }}'})
def test_default03(self):
output = self.engine.render_to_string("default03", {"a": mark_safe("x>")})
self.assertEqual(output, "x>")
@setup({"default04": '{% autoescape off %}{{ a|default:"x<" }}{% endautoescape %}'})
def test_default04(self):
output = self.engine.render_to_string("default04", {"a": mark_safe("x>")})
self.assertEqual(output, "x>")
| DefaultTests |
python | getsentry__sentry | src/sentry/notifications/platform/templates/sample.py | {
"start": 558,
"end": 868
} | class ____(NotificationData):
source = "error-alert-service"
error_type: str
error_message: str
project_name: str
issue_id: str
error_count: int
first_seen: str
chart_url: str
issue_url: str
assign_url: str
@template_registry.register(ErrorAlertData.source)
| ErrorAlertData |
python | huggingface__transformers | tests/models/rembert/test_modeling_rembert.py | {
"start": 13362,
"end": 17253
} | class ____(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = (
(
RemBertModel,
RemBertForMaskedLM,
RemBertForCausalLM,
RemBertForMultipleChoice,
RemBertForQuestionAnswering,
RemBertForSequenceClassification,
RemBertForTokenClassification,
)
if is_torch_available()
else ()
)
# Doesn't run generation tests. There are interface mismatches when using `generate` -- TODO @gante
all_generative_model_classes = ()
pipeline_model_mapping = (
{
"feature-extraction": RemBertModel,
"fill-mask": RemBertForMaskedLM,
"question-answering": RemBertForQuestionAnswering,
"text-classification": RemBertForSequenceClassification,
"text-generation": RemBertForCausalLM,
"token-classification": RemBertForTokenClassification,
"zero-shot": RemBertForSequenceClassification,
}
if is_torch_available()
else {}
)
def setUp(self):
self.model_tester = RemBertModelTester(self)
self.config_tester = ConfigTester(self, config_class=RemBertConfig, hidden_size=37)
def test_config(self):
self.config_tester.run_common_tests()
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
def test_for_masked_lm(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*config_and_inputs)
def test_for_multiple_choice(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*config_and_inputs)
def test_decoder_model_past_with_large_inputs(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*config_and_inputs)
def test_for_question_answering(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*config_and_inputs)
def test_for_sequence_classification(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*config_and_inputs)
def test_for_token_classification(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*config_and_inputs)
def test_model_as_decoder(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*config_and_inputs)
def test_model_as_decoder_with_default_input_mask(self):
(
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
) = self.model_tester.prepare_config_and_inputs_for_decoder()
input_mask = None
self.model_tester.create_and_check_model_as_decoder(
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
@slow
def test_model_from_pretrained(self):
model_name = "google/rembert"
model = RemBertModel.from_pretrained(model_name)
self.assertIsNotNone(model)
@require_torch
| RemBertModelTest |
python | walkccc__LeetCode | solutions/2761. Prime Pairs With Target Sum/2761.py | {
"start": 0,
"end": 491
} | class ____:
def findPrimePairs(self, n: int) -> list[list[int]]:
isPrime = self._sieveEratosthenes(n + 1)
return [[i, n - i] for i in range(2, n // 2 + 1)
if isPrime[i] and isPrime[n - i]]
def _sieveEratosthenes(self, n: int) -> list[bool]:
isPrime = [True] * n
isPrime[0] = False
isPrime[1] = False
for i in range(2, int(n**0.5) + 1):
if isPrime[i]:
for j in range(i * i, n, i):
isPrime[j] = False
return isPrime
j
| Solution |
python | django__django | tests/auth_tests/models/with_unique_constraint.py | {
"start": 104,
"end": 355
} | class ____(BaseUserManager):
def create_superuser(self, username, password):
user = self.model(username=username)
user.set_password(password)
user.save(using=self._db)
return user
| CustomUserWithUniqueConstraintManager |
python | fsspec__filesystem_spec | fsspec/implementations/cached.py | {
"start": 961,
"end": 1326
} | class ____(Transaction):
def complete(self, commit=True):
rpaths = [f.path for f in self.files]
lpaths = [f.fn for f in self.files]
if commit:
self.fs.put(lpaths, rpaths)
self.files.clear()
self.fs._intrans = False
self.fs._transaction = None
self.fs = None # break cycle
| WriteCachedTransaction |
python | astropy__astropy | astropy/time/formats.py | {
"start": 22268,
"end": 25732
} | class ____(TimeNumeric):
"""
Time as a decimal year, with integer values corresponding to midnight of the first
day of each year.
The fractional part represents the exact fraction of the year, considering the
precise number of days in the year (365 or 366). The following example shows
essentially how the decimal year is computed::
>>> from astropy.time import Time
>>> tm = Time("2024-04-05T12:34:00")
>>> tm0 = Time("2024-01-01T00:00:00")
>>> tm1 = Time("2025-01-01T00:00:00")
>>> print(2024 + (tm.jd - tm0.jd) / (tm1.jd - tm0.jd)) # doctest: +FLOAT_CMP
2024.2609934729812
>>> print(tm.decimalyear) # doctest: +FLOAT_CMP
2024.2609934729812
Since for this format the length of the year varies between 365 and 366 days, it is
not possible to use Quantity input, in which a year is always 365.25 days.
This format is convenient for low-precision applications or for plotting data.
"""
name = "decimalyear"
def _check_val_type(self, val1, val2):
_check_val_type_not_quantity(self.name, val1, val2)
# if val2 is a Quantity, super() will raise a TypeError.
return super()._check_val_type(val1, val2)
def set_jds(self, val1, val2):
self._check_scale(self._scale) # Validate scale.
sum12, err12 = two_sum(val1, val2)
iy_start = np.trunc(sum12).astype(int)
extra, y_frac = two_sum(sum12, -iy_start)
y_frac += extra + err12
val = (val1 + val2).astype(np.double)
iy_start = np.trunc(val).astype(int)
imon = np.ones_like(iy_start)
iday = np.ones_like(iy_start)
ihr = np.zeros_like(iy_start)
imin = np.zeros_like(iy_start)
isec = np.zeros_like(y_frac)
# Possible enhancement: use np.unique to only compute start, stop
# for unique values of iy_start.
scale = self.scale.upper().encode("ascii")
jd1_start, jd2_start = erfa.dtf2d(scale, iy_start, imon, iday, ihr, imin, isec)
jd1_end, jd2_end = erfa.dtf2d(scale, iy_start + 1, imon, iday, ihr, imin, isec)
t_start = Time(jd1_start, jd2_start, scale=self.scale, format="jd")
t_end = Time(jd1_end, jd2_end, scale=self.scale, format="jd")
t_frac = t_start + (t_end - t_start) * y_frac
self.jd1, self.jd2 = day_frac(t_frac.jd1, t_frac.jd2)
def to_value(self, **kwargs):
scale = self.scale.upper().encode("ascii")
# precision=0
iy_start, ims, ids, ihmsfs = erfa.d2dtf(scale, 0, self.jd1, self.jd2)
imon = np.ones_like(iy_start)
iday = np.ones_like(iy_start)
ihr = np.zeros_like(iy_start)
imin = np.zeros_like(iy_start)
isec = np.zeros_like(self.jd1)
# Possible enhancement: use np.unique to only compute start, stop
# for unique values of iy_start.
scale = self.scale.upper().encode("ascii")
jd1_start, jd2_start = erfa.dtf2d(scale, iy_start, imon, iday, ihr, imin, isec)
jd1_end, jd2_end = erfa.dtf2d(scale, iy_start + 1, imon, iday, ihr, imin, isec)
# Trying to be precise, but more than float64 not useful.
dt = (self.jd1 - jd1_start) + (self.jd2 - jd2_start)
dt_end = (jd1_end - jd1_start) + (jd2_end - jd2_start)
decimalyear = iy_start + dt / dt_end
return super().to_value(jd1=decimalyear, jd2=np.float64(0.0), **kwargs)
value = property(to_value)
| TimeDecimalYear |
python | great-expectations__great_expectations | contrib/experimental/great_expectations_experimental/expectations/expect_queried_column_pair_values_to_be_both_filled_or_null.py | {
"start": 485,
"end": 6137
} | class ____(QueryExpectation):
"""Expect the values of a pair of columns to be either both filled or empty simultaneously.
It checks if 2 columns are aligned - the values of each row need to either be both empty or filled.
The expectation will fail if there's at least one row where one column is filled and the other isn't.
Args:
template_dict: dict with the following keys: \
column_a (str): first column name, to compare values against column_b
column_b (str): second column name, to compare values against column_a
Returns:
None
"""
metric_dependencies = ("query.template_values",)
query = """
SELECT
COUNT(1)
FROM
{batch}
WHERE
({column_a} is not null and {column_b} is null)
OR
({column_a} is null and {column_b} is not null)
"""
success_keys = (
"template_dict",
"query",
)
domain_keys = ("batch_id", "row_condition", "condition_parser")
default_kwarg_values = {
"result_format": "BASIC",
"catch_exceptions": False,
"meta": None,
"query": query,
}
def validate_configuration(self, configuration: Optional[ExpectationConfiguration]) -> None:
"""
Validates that a configuration has been set, and sets a configuration if it has yet to be set. Ensures that
necessary configuration arguments have been provided for the validation of the expectation.
Args:
configuration (OPTIONAL[ExpectationConfiguration]): \
An optional Expectation Configuration entry that will be used to configure the expectation
Returns:
None. Raises InvalidExpectationConfigurationError if the config is not validated successfully
"""
super().validate_configuration(configuration)
configuration = configuration or self.configuration
template_dict = configuration.kwargs.get("template_dict")
try:
assert isinstance(template_dict, dict), "'template_dict' must be supplied as a dict"
assert all(
[
"column_a" in template_dict,
"column_b" in template_dict,
]
), "The following keys must be in the template dict: column_a, column_b"
assert isinstance(template_dict["column_a"], str), "column_a must be a string"
assert isinstance(template_dict["column_b"], str), "column_b must be a string"
except AssertionError as e:
raise InvalidExpectationConfigurationError(str(e))
def _validate(
self,
metrics: dict,
runtime_configuration: dict = None,
execution_engine: ExecutionEngine = None,
) -> Union[ExpectationValidationResult, dict]:
metrics = convert_to_json_serializable(data=metrics)
try:
num_of_inconsistent_rows = list(metrics.get("query.template_values")[0].values())[0]
except IndexError:
raise IndexError("Invalid index - query.template_values has no [0] index]") # noqa: TRY003
is_success = not num_of_inconsistent_rows or num_of_inconsistent_rows == 0
return {
"success": is_success,
"result": {"info": f"Row count with inconsistent values: {num_of_inconsistent_rows}"},
}
examples = [
{
"data": [
{
"data": {
"col1": [1, 2, 2, 3, 4],
"col2": [5, 0, 3, 1, 4],
"col3": ["a", "b", "c", "d", "e"],
"col4": [None, 6, 7, 9, 6],
},
},
],
"tests": [
{
"title": "basic_positive_test_same_type",
"exact_match_out": False,
"include_in_gallery": True,
"in": {"template_dict": {"column_a": "col1", "column_b": "col2"}},
"out": {"success": True},
"only_for": ["sqlite"],
},
{
"title": "basic_negative_test_same_type",
"exact_match_out": False,
"include_in_gallery": True,
"in": {"template_dict": {"column_a": "col1", "column_b": "col4"}},
"out": {"success": False},
"only_for": ["sqlite"],
},
{
"title": "basic_positive_test_different_type",
"exact_match_out": False,
"include_in_gallery": True,
"in": {"template_dict": {"column_a": "col2", "column_b": "col3"}},
"out": {"success": True},
"only_for": ["sqlite"],
},
{
"title": "basic_negative_test_different_type",
"exact_match_out": False,
"include_in_gallery": True,
"in": {"template_dict": {"column_a": "col3", "column_b": "col4"}},
"out": {"success": False},
"only_for": ["sqlite"],
},
],
},
]
# This dictionary contains metadata for display in the public gallery
library_metadata = {
"tags": ["query-based"],
"contributors": ["@eden-o"],
}
if __name__ == "__main__":
ExpectQueriedColumnPairValuesToBeBothFilledOrNull().print_diagnostic_checklist()
| ExpectQueriedColumnPairValuesToBeBothFilledOrNull |
python | numpy__numpy | numpy/lib/tests/test_nanfunctions.py | {
"start": 1572,
"end": 3250
} | class ____:
NANFUNCS = {
np.nanmin: np.amin,
np.nanmax: np.amax,
np.nanargmin: np.argmin,
np.nanargmax: np.argmax,
np.nansum: np.sum,
np.nanprod: np.prod,
np.nancumsum: np.cumsum,
np.nancumprod: np.cumprod,
np.nanmean: np.mean,
np.nanmedian: np.median,
np.nanpercentile: np.percentile,
np.nanquantile: np.quantile,
np.nanvar: np.var,
np.nanstd: np.std,
}
IDS = [k.__name__ for k in NANFUNCS]
@staticmethod
def get_signature(func, default="..."):
"""Construct a signature and replace all default parameter-values."""
prm_list = []
signature = inspect.signature(func)
for prm in signature.parameters.values():
if prm.default is inspect.Parameter.empty:
prm_list.append(prm)
else:
prm_list.append(prm.replace(default=default))
return inspect.Signature(prm_list)
@pytest.mark.parametrize("nan_func,func", NANFUNCS.items(), ids=IDS)
def test_signature_match(self, nan_func, func):
# Ignore the default parameter-values as they can sometimes differ
# between the two functions (*e.g.* one has `False` while the other
# has `np._NoValue`)
signature = self.get_signature(func)
nan_signature = self.get_signature(nan_func)
np.testing.assert_equal(signature, nan_signature)
def test_exhaustiveness(self):
"""Validate that all nan functions are actually tested."""
np.testing.assert_equal(
set(self.IDS), set(np.lib._nanfunctions_impl.__all__)
)
| TestSignatureMatch |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/definitions/freshness.py | {
"start": 11009,
"end": 12520
} | class ____(LoadableBy[AssetKey]):
entity_key: AssetKey
freshness_state: FreshnessState
updated_at: datetime
record_body: FreshnessStateRecordBody
@staticmethod
def from_db_row(db_row):
return FreshnessStateRecord(
entity_key=check.not_none(AssetKey.from_db_string(db_row.entity_key)),
freshness_state=FreshnessState(db_row.freshness_state),
record_body=deserialize_value(db_row.record_body, FreshnessStateRecordBody),
updated_at=db_row.update_timestamp.replace(tzinfo=timezone.utc),
)
@staticmethod
def from_event_log_entry(entry: "EventLogEntry") -> "FreshnessStateRecord":
dagster_event = check.not_none(entry.dagster_event)
event_data = check.inst(dagster_event.event_specific_data, FreshnessStateChange)
return FreshnessStateRecord(
entity_key=event_data.key,
freshness_state=event_data.new_state,
updated_at=datetime_from_timestamp(event_data.state_change_timestamp),
# note: metadata is not currently stored in the event log
record_body=FreshnessStateRecordBody(metadata={}),
)
@classmethod
def _blocking_batch_load(
cls, keys: Iterable[AssetKey], context: LoadingContext
) -> Iterable[Optional["FreshnessStateRecord"]]:
keys = list(keys)
state_records = context.instance.get_freshness_state_records(keys)
return [state_records.get(key) for key in keys]
| FreshnessStateRecord |
python | openai__openai-python | src/openai/resources/responses/responses.py | {
"start": 2369,
"end": 78774
} | class ____(SyncAPIResource):
@cached_property
def input_items(self) -> InputItems:
return InputItems(self._client)
@cached_property
def input_tokens(self) -> InputTokens:
return InputTokens(self._client)
@cached_property
def with_raw_response(self) -> ResponsesWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return ResponsesWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> ResponsesWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return ResponsesWithStreamingResponse(self)
@overload
def create(
self,
*,
background: Optional[bool] | Omit = omit,
conversation: Optional[response_create_params.Conversation] | Omit = omit,
include: Optional[List[ResponseIncludable]] | Omit = omit,
input: Union[str, ResponseInputParam] | Omit = omit,
instructions: Optional[str] | Omit = omit,
max_output_tokens: Optional[int] | Omit = omit,
max_tool_calls: Optional[int] | Omit = omit,
metadata: Optional[Metadata] | Omit = omit,
model: ResponsesModel | Omit = omit,
parallel_tool_calls: Optional[bool] | Omit = omit,
previous_response_id: Optional[str] | Omit = omit,
prompt: Optional[ResponsePromptParam] | Omit = omit,
prompt_cache_key: str | Omit = omit,
prompt_cache_retention: Optional[Literal["in-memory", "24h"]] | Omit = omit,
reasoning: Optional[Reasoning] | Omit = omit,
safety_identifier: str | Omit = omit,
service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | Omit = omit,
store: Optional[bool] | Omit = omit,
stream: Optional[Literal[False]] | Omit = omit,
stream_options: Optional[response_create_params.StreamOptions] | Omit = omit,
temperature: Optional[float] | Omit = omit,
text: ResponseTextConfigParam | Omit = omit,
tool_choice: response_create_params.ToolChoice | Omit = omit,
tools: Iterable[ToolParam] | Omit = omit,
top_logprobs: Optional[int] | Omit = omit,
top_p: Optional[float] | Omit = omit,
truncation: Optional[Literal["auto", "disabled"]] | Omit = omit,
user: str | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> Response:
"""Creates a model response.
Provide
[text](https://platform.openai.com/docs/guides/text) or
[image](https://platform.openai.com/docs/guides/images) inputs to generate
[text](https://platform.openai.com/docs/guides/text) or
[JSON](https://platform.openai.com/docs/guides/structured-outputs) outputs. Have
the model call your own
[custom code](https://platform.openai.com/docs/guides/function-calling) or use
built-in [tools](https://platform.openai.com/docs/guides/tools) like
[web search](https://platform.openai.com/docs/guides/tools-web-search) or
[file search](https://platform.openai.com/docs/guides/tools-file-search) to use
your own data as input for the model's response.
Args:
background: Whether to run the model response in the background.
[Learn more](https://platform.openai.com/docs/guides/background).
conversation: The conversation that this response belongs to. Items from this conversation are
prepended to `input_items` for this response request. Input items and output
items from this response are automatically added to this conversation after this
response completes.
include: Specify additional output data to include in the model response. Currently
supported values are:
- `web_search_call.action.sources`: Include the sources of the web search tool
call.
- `code_interpreter_call.outputs`: Includes the outputs of python code execution
in code interpreter tool call items.
- `computer_call_output.output.image_url`: Include image urls from the computer
call output.
- `file_search_call.results`: Include the search results of the file search tool
call.
- `message.input_image.image_url`: Include image urls from the input message.
- `message.output_text.logprobs`: Include logprobs with assistant messages.
- `reasoning.encrypted_content`: Includes an encrypted version of reasoning
tokens in reasoning item outputs. This enables reasoning items to be used in
multi-turn conversations when using the Responses API statelessly (like when
the `store` parameter is set to `false`, or when an organization is enrolled
in the zero data retention program).
input: Text, image, or file inputs to the model, used to generate a response.
Learn more:
- [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
- [Image inputs](https://platform.openai.com/docs/guides/images)
- [File inputs](https://platform.openai.com/docs/guides/pdf-files)
- [Conversation state](https://platform.openai.com/docs/guides/conversation-state)
- [Function calling](https://platform.openai.com/docs/guides/function-calling)
instructions: A system (or developer) message inserted into the model's context.
When using along with `previous_response_id`, the instructions from a previous
response will not be carried over to the next response. This makes it simple to
swap out system (or developer) messages in new responses.
max_output_tokens: An upper bound for the number of tokens that can be generated for a response,
including visible output tokens and
[reasoning tokens](https://platform.openai.com/docs/guides/reasoning).
max_tool_calls: The maximum number of total calls to built-in tools that can be processed in a
response. This maximum number applies across all built-in tool calls, not per
individual tool. Any further attempts to call a tool by the model will be
ignored.
metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
for storing additional information about the object in a structured format, and
querying for objects via API or the dashboard.
Keys are strings with a maximum length of 64 characters. Values are strings with
a maximum length of 512 characters.
model: Model ID used to generate the response, like `gpt-4o` or `o3`. OpenAI offers a
wide range of models with different capabilities, performance characteristics,
and price points. Refer to the
[model guide](https://platform.openai.com/docs/models) to browse and compare
available models.
parallel_tool_calls: Whether to allow the model to run tool calls in parallel.
previous_response_id: The unique ID of the previous response to the model. Use this to create
multi-turn conversations. Learn more about
[conversation state](https://platform.openai.com/docs/guides/conversation-state).
Cannot be used in conjunction with `conversation`.
prompt: Reference to a prompt template and its variables.
[Learn more](https://platform.openai.com/docs/guides/text?api-mode=responses#reusable-prompts).
prompt_cache_key: Used by OpenAI to cache responses for similar requests to optimize your cache
hit rates. Replaces the `user` field.
[Learn more](https://platform.openai.com/docs/guides/prompt-caching).
prompt_cache_retention: The retention policy for the prompt cache. Set to `24h` to enable extended
prompt caching, which keeps cached prefixes active for longer, up to a maximum
of 24 hours.
[Learn more](https://platform.openai.com/docs/guides/prompt-caching#prompt-cache-retention).
reasoning: **gpt-5 and o-series models only**
Configuration options for
[reasoning models](https://platform.openai.com/docs/guides/reasoning).
safety_identifier: A stable identifier used to help detect users of your application that may be
violating OpenAI's usage policies. The IDs should be a string that uniquely
identifies each user. We recommend hashing their username or email address, in
order to avoid sending us any identifying information.
[Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
service_tier: Specifies the processing type used for serving the request.
- If set to 'auto', then the request will be processed with the service tier
configured in the Project settings. Unless otherwise configured, the Project
will use 'default'.
- If set to 'default', then the request will be processed with the standard
pricing and performance for the selected model.
- If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
'[priority](https://openai.com/api-priority-processing/)', then the request
will be processed with the corresponding service tier.
- When not set, the default behavior is 'auto'.
When the `service_tier` parameter is set, the response body will include the
`service_tier` value based on the processing mode actually used to serve the
request. This response value may be different from the value set in the
parameter.
store: Whether to store the generated model response for later retrieval via API.
stream: If set to true, the model response data will be streamed to the client as it is
generated using
[server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format).
See the
[Streaming section below](https://platform.openai.com/docs/api-reference/responses-streaming)
for more information.
stream_options: Options for streaming responses. Only set this when you set `stream: true`.
temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
make the output more random, while lower values like 0.2 will make it more
focused and deterministic. We generally recommend altering this or `top_p` but
not both.
text: Configuration options for a text response from the model. Can be plain text or
structured JSON data. Learn more:
- [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
- [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs)
tool_choice: How the model should select which tool (or tools) to use when generating a
response. See the `tools` parameter to see how to specify which tools the model
can call.
tools: An array of tools the model may call while generating a response. You can
specify which tool to use by setting the `tool_choice` parameter.
We support the following categories of tools:
- **Built-in tools**: Tools that are provided by OpenAI that extend the model's
capabilities, like
[web search](https://platform.openai.com/docs/guides/tools-web-search) or
[file search](https://platform.openai.com/docs/guides/tools-file-search).
Learn more about
[built-in tools](https://platform.openai.com/docs/guides/tools).
- **MCP Tools**: Integrations with third-party systems via custom MCP servers or
predefined connectors such as Google Drive and SharePoint. Learn more about
[MCP Tools](https://platform.openai.com/docs/guides/tools-connectors-mcp).
- **Function calls (custom tools)**: Functions that are defined by you, enabling
the model to call your own code with strongly typed arguments and outputs.
Learn more about
[function calling](https://platform.openai.com/docs/guides/function-calling).
You can also use custom tools to call your own code.
top_logprobs: An integer between 0 and 20 specifying the number of most likely tokens to
return at each token position, each with an associated log probability.
top_p: An alternative to sampling with temperature, called nucleus sampling, where the
model considers the results of the tokens with top_p probability mass. So 0.1
means only the tokens comprising the top 10% probability mass are considered.
We generally recommend altering this or `temperature` but not both.
truncation: The truncation strategy to use for the model response.
- `auto`: If the input to this Response exceeds the model's context window size,
the model will truncate the response to fit the context window by dropping
items from the beginning of the conversation.
- `disabled` (default): If the input size will exceed the context window size
for a model, the request will fail with a 400 error.
user: This field is being replaced by `safety_identifier` and `prompt_cache_key`. Use
`prompt_cache_key` instead to maintain caching optimizations. A stable
identifier for your end-users. Used to boost cache hit rates by better bucketing
similar requests and to help OpenAI detect and prevent abuse.
[Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
...
@overload
def create(
self,
*,
stream: Literal[True],
background: Optional[bool] | Omit = omit,
conversation: Optional[response_create_params.Conversation] | Omit = omit,
include: Optional[List[ResponseIncludable]] | Omit = omit,
input: Union[str, ResponseInputParam] | Omit = omit,
instructions: Optional[str] | Omit = omit,
max_output_tokens: Optional[int] | Omit = omit,
max_tool_calls: Optional[int] | Omit = omit,
metadata: Optional[Metadata] | Omit = omit,
model: ResponsesModel | Omit = omit,
parallel_tool_calls: Optional[bool] | Omit = omit,
previous_response_id: Optional[str] | Omit = omit,
prompt: Optional[ResponsePromptParam] | Omit = omit,
prompt_cache_key: str | Omit = omit,
prompt_cache_retention: Optional[Literal["in-memory", "24h"]] | Omit = omit,
reasoning: Optional[Reasoning] | Omit = omit,
safety_identifier: str | Omit = omit,
service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | Omit = omit,
store: Optional[bool] | Omit = omit,
stream_options: Optional[response_create_params.StreamOptions] | Omit = omit,
temperature: Optional[float] | Omit = omit,
text: ResponseTextConfigParam | Omit = omit,
tool_choice: response_create_params.ToolChoice | Omit = omit,
tools: Iterable[ToolParam] | Omit = omit,
top_logprobs: Optional[int] | Omit = omit,
top_p: Optional[float] | Omit = omit,
truncation: Optional[Literal["auto", "disabled"]] | Omit = omit,
user: str | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> Stream[ResponseStreamEvent]:
"""Creates a model response.
Provide
[text](https://platform.openai.com/docs/guides/text) or
[image](https://platform.openai.com/docs/guides/images) inputs to generate
[text](https://platform.openai.com/docs/guides/text) or
[JSON](https://platform.openai.com/docs/guides/structured-outputs) outputs. Have
the model call your own
[custom code](https://platform.openai.com/docs/guides/function-calling) or use
built-in [tools](https://platform.openai.com/docs/guides/tools) like
[web search](https://platform.openai.com/docs/guides/tools-web-search) or
[file search](https://platform.openai.com/docs/guides/tools-file-search) to use
your own data as input for the model's response.
Args:
stream: If set to true, the model response data will be streamed to the client as it is
generated using
[server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format).
See the
[Streaming section below](https://platform.openai.com/docs/api-reference/responses-streaming)
for more information.
background: Whether to run the model response in the background.
[Learn more](https://platform.openai.com/docs/guides/background).
conversation: The conversation that this response belongs to. Items from this conversation are
prepended to `input_items` for this response request. Input items and output
items from this response are automatically added to this conversation after this
response completes.
include: Specify additional output data to include in the model response. Currently
supported values are:
- `web_search_call.action.sources`: Include the sources of the web search tool
call.
- `code_interpreter_call.outputs`: Includes the outputs of python code execution
in code interpreter tool call items.
- `computer_call_output.output.image_url`: Include image urls from the computer
call output.
- `file_search_call.results`: Include the search results of the file search tool
call.
- `message.input_image.image_url`: Include image urls from the input message.
- `message.output_text.logprobs`: Include logprobs with assistant messages.
- `reasoning.encrypted_content`: Includes an encrypted version of reasoning
tokens in reasoning item outputs. This enables reasoning items to be used in
multi-turn conversations when using the Responses API statelessly (like when
the `store` parameter is set to `false`, or when an organization is enrolled
in the zero data retention program).
input: Text, image, or file inputs to the model, used to generate a response.
Learn more:
- [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
- [Image inputs](https://platform.openai.com/docs/guides/images)
- [File inputs](https://platform.openai.com/docs/guides/pdf-files)
- [Conversation state](https://platform.openai.com/docs/guides/conversation-state)
- [Function calling](https://platform.openai.com/docs/guides/function-calling)
instructions: A system (or developer) message inserted into the model's context.
When using along with `previous_response_id`, the instructions from a previous
response will not be carried over to the next response. This makes it simple to
swap out system (or developer) messages in new responses.
max_output_tokens: An upper bound for the number of tokens that can be generated for a response,
including visible output tokens and
[reasoning tokens](https://platform.openai.com/docs/guides/reasoning).
max_tool_calls: The maximum number of total calls to built-in tools that can be processed in a
response. This maximum number applies across all built-in tool calls, not per
individual tool. Any further attempts to call a tool by the model will be
ignored.
metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
for storing additional information about the object in a structured format, and
querying for objects via API or the dashboard.
Keys are strings with a maximum length of 64 characters. Values are strings with
a maximum length of 512 characters.
model: Model ID used to generate the response, like `gpt-4o` or `o3`. OpenAI offers a
wide range of models with different capabilities, performance characteristics,
and price points. Refer to the
[model guide](https://platform.openai.com/docs/models) to browse and compare
available models.
parallel_tool_calls: Whether to allow the model to run tool calls in parallel.
previous_response_id: The unique ID of the previous response to the model. Use this to create
multi-turn conversations. Learn more about
[conversation state](https://platform.openai.com/docs/guides/conversation-state).
Cannot be used in conjunction with `conversation`.
prompt: Reference to a prompt template and its variables.
[Learn more](https://platform.openai.com/docs/guides/text?api-mode=responses#reusable-prompts).
prompt_cache_key: Used by OpenAI to cache responses for similar requests to optimize your cache
hit rates. Replaces the `user` field.
[Learn more](https://platform.openai.com/docs/guides/prompt-caching).
prompt_cache_retention: The retention policy for the prompt cache. Set to `24h` to enable extended
prompt caching, which keeps cached prefixes active for longer, up to a maximum
of 24 hours.
[Learn more](https://platform.openai.com/docs/guides/prompt-caching#prompt-cache-retention).
reasoning: **gpt-5 and o-series models only**
Configuration options for
[reasoning models](https://platform.openai.com/docs/guides/reasoning).
safety_identifier: A stable identifier used to help detect users of your application that may be
violating OpenAI's usage policies. The IDs should be a string that uniquely
identifies each user. We recommend hashing their username or email address, in
order to avoid sending us any identifying information.
[Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
service_tier: Specifies the processing type used for serving the request.
- If set to 'auto', then the request will be processed with the service tier
configured in the Project settings. Unless otherwise configured, the Project
will use 'default'.
- If set to 'default', then the request will be processed with the standard
pricing and performance for the selected model.
- If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
'[priority](https://openai.com/api-priority-processing/)', then the request
will be processed with the corresponding service tier.
- When not set, the default behavior is 'auto'.
When the `service_tier` parameter is set, the response body will include the
`service_tier` value based on the processing mode actually used to serve the
request. This response value may be different from the value set in the
parameter.
store: Whether to store the generated model response for later retrieval via API.
stream_options: Options for streaming responses. Only set this when you set `stream: true`.
temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
make the output more random, while lower values like 0.2 will make it more
focused and deterministic. We generally recommend altering this or `top_p` but
not both.
text: Configuration options for a text response from the model. Can be plain text or
structured JSON data. Learn more:
- [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
- [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs)
tool_choice: How the model should select which tool (or tools) to use when generating a
response. See the `tools` parameter to see how to specify which tools the model
can call.
tools: An array of tools the model may call while generating a response. You can
specify which tool to use by setting the `tool_choice` parameter.
We support the following categories of tools:
- **Built-in tools**: Tools that are provided by OpenAI that extend the model's
capabilities, like
[web search](https://platform.openai.com/docs/guides/tools-web-search) or
[file search](https://platform.openai.com/docs/guides/tools-file-search).
Learn more about
[built-in tools](https://platform.openai.com/docs/guides/tools).
- **MCP Tools**: Integrations with third-party systems via custom MCP servers or
predefined connectors such as Google Drive and SharePoint. Learn more about
[MCP Tools](https://platform.openai.com/docs/guides/tools-connectors-mcp).
- **Function calls (custom tools)**: Functions that are defined by you, enabling
the model to call your own code with strongly typed arguments and outputs.
Learn more about
[function calling](https://platform.openai.com/docs/guides/function-calling).
You can also use custom tools to call your own code.
top_logprobs: An integer between 0 and 20 specifying the number of most likely tokens to
return at each token position, each with an associated log probability.
top_p: An alternative to sampling with temperature, called nucleus sampling, where the
model considers the results of the tokens with top_p probability mass. So 0.1
means only the tokens comprising the top 10% probability mass are considered.
We generally recommend altering this or `temperature` but not both.
truncation: The truncation strategy to use for the model response.
- `auto`: If the input to this Response exceeds the model's context window size,
the model will truncate the response to fit the context window by dropping
items from the beginning of the conversation.
- `disabled` (default): If the input size will exceed the context window size
for a model, the request will fail with a 400 error.
user: This field is being replaced by `safety_identifier` and `prompt_cache_key`. Use
`prompt_cache_key` instead to maintain caching optimizations. A stable
identifier for your end-users. Used to boost cache hit rates by better bucketing
similar requests and to help OpenAI detect and prevent abuse.
[Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
...
@overload
def create(
self,
*,
stream: bool,
background: Optional[bool] | Omit = omit,
conversation: Optional[response_create_params.Conversation] | Omit = omit,
include: Optional[List[ResponseIncludable]] | Omit = omit,
input: Union[str, ResponseInputParam] | Omit = omit,
instructions: Optional[str] | Omit = omit,
max_output_tokens: Optional[int] | Omit = omit,
max_tool_calls: Optional[int] | Omit = omit,
metadata: Optional[Metadata] | Omit = omit,
model: ResponsesModel | Omit = omit,
parallel_tool_calls: Optional[bool] | Omit = omit,
previous_response_id: Optional[str] | Omit = omit,
prompt: Optional[ResponsePromptParam] | Omit = omit,
prompt_cache_key: str | Omit = omit,
prompt_cache_retention: Optional[Literal["in-memory", "24h"]] | Omit = omit,
reasoning: Optional[Reasoning] | Omit = omit,
safety_identifier: str | Omit = omit,
service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | Omit = omit,
store: Optional[bool] | Omit = omit,
stream_options: Optional[response_create_params.StreamOptions] | Omit = omit,
temperature: Optional[float] | Omit = omit,
text: ResponseTextConfigParam | Omit = omit,
tool_choice: response_create_params.ToolChoice | Omit = omit,
tools: Iterable[ToolParam] | Omit = omit,
top_logprobs: Optional[int] | Omit = omit,
top_p: Optional[float] | Omit = omit,
truncation: Optional[Literal["auto", "disabled"]] | Omit = omit,
user: str | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> Response | Stream[ResponseStreamEvent]:
"""Creates a model response.
Provide
[text](https://platform.openai.com/docs/guides/text) or
[image](https://platform.openai.com/docs/guides/images) inputs to generate
[text](https://platform.openai.com/docs/guides/text) or
[JSON](https://platform.openai.com/docs/guides/structured-outputs) outputs. Have
the model call your own
[custom code](https://platform.openai.com/docs/guides/function-calling) or use
built-in [tools](https://platform.openai.com/docs/guides/tools) like
[web search](https://platform.openai.com/docs/guides/tools-web-search) or
[file search](https://platform.openai.com/docs/guides/tools-file-search) to use
your own data as input for the model's response.
Args:
stream: If set to true, the model response data will be streamed to the client as it is
generated using
[server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format).
See the
[Streaming section below](https://platform.openai.com/docs/api-reference/responses-streaming)
for more information.
background: Whether to run the model response in the background.
[Learn more](https://platform.openai.com/docs/guides/background).
conversation: The conversation that this response belongs to. Items from this conversation are
prepended to `input_items` for this response request. Input items and output
items from this response are automatically added to this conversation after this
response completes.
include: Specify additional output data to include in the model response. Currently
supported values are:
- `web_search_call.action.sources`: Include the sources of the web search tool
call.
- `code_interpreter_call.outputs`: Includes the outputs of python code execution
in code interpreter tool call items.
- `computer_call_output.output.image_url`: Include image urls from the computer
call output.
- `file_search_call.results`: Include the search results of the file search tool
call.
- `message.input_image.image_url`: Include image urls from the input message.
- `message.output_text.logprobs`: Include logprobs with assistant messages.
- `reasoning.encrypted_content`: Includes an encrypted version of reasoning
tokens in reasoning item outputs. This enables reasoning items to be used in
multi-turn conversations when using the Responses API statelessly (like when
the `store` parameter is set to `false`, or when an organization is enrolled
in the zero data retention program).
input: Text, image, or file inputs to the model, used to generate a response.
Learn more:
- [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
- [Image inputs](https://platform.openai.com/docs/guides/images)
- [File inputs](https://platform.openai.com/docs/guides/pdf-files)
- [Conversation state](https://platform.openai.com/docs/guides/conversation-state)
- [Function calling](https://platform.openai.com/docs/guides/function-calling)
instructions: A system (or developer) message inserted into the model's context.
When using along with `previous_response_id`, the instructions from a previous
response will not be carried over to the next response. This makes it simple to
swap out system (or developer) messages in new responses.
max_output_tokens: An upper bound for the number of tokens that can be generated for a response,
including visible output tokens and
[reasoning tokens](https://platform.openai.com/docs/guides/reasoning).
max_tool_calls: The maximum number of total calls to built-in tools that can be processed in a
response. This maximum number applies across all built-in tool calls, not per
individual tool. Any further attempts to call a tool by the model will be
ignored.
metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
for storing additional information about the object in a structured format, and
querying for objects via API or the dashboard.
Keys are strings with a maximum length of 64 characters. Values are strings with
a maximum length of 512 characters.
model: Model ID used to generate the response, like `gpt-4o` or `o3`. OpenAI offers a
wide range of models with different capabilities, performance characteristics,
and price points. Refer to the
[model guide](https://platform.openai.com/docs/models) to browse and compare
available models.
parallel_tool_calls: Whether to allow the model to run tool calls in parallel.
previous_response_id: The unique ID of the previous response to the model. Use this to create
multi-turn conversations. Learn more about
[conversation state](https://platform.openai.com/docs/guides/conversation-state).
Cannot be used in conjunction with `conversation`.
prompt: Reference to a prompt template and its variables.
[Learn more](https://platform.openai.com/docs/guides/text?api-mode=responses#reusable-prompts).
prompt_cache_key: Used by OpenAI to cache responses for similar requests to optimize your cache
hit rates. Replaces the `user` field.
[Learn more](https://platform.openai.com/docs/guides/prompt-caching).
prompt_cache_retention: The retention policy for the prompt cache. Set to `24h` to enable extended
prompt caching, which keeps cached prefixes active for longer, up to a maximum
of 24 hours.
[Learn more](https://platform.openai.com/docs/guides/prompt-caching#prompt-cache-retention).
reasoning: **gpt-5 and o-series models only**
Configuration options for
[reasoning models](https://platform.openai.com/docs/guides/reasoning).
safety_identifier: A stable identifier used to help detect users of your application that may be
violating OpenAI's usage policies. The IDs should be a string that uniquely
identifies each user. We recommend hashing their username or email address, in
order to avoid sending us any identifying information.
[Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
service_tier: Specifies the processing type used for serving the request.
- If set to 'auto', then the request will be processed with the service tier
configured in the Project settings. Unless otherwise configured, the Project
will use 'default'.
- If set to 'default', then the request will be processed with the standard
pricing and performance for the selected model.
- If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
'[priority](https://openai.com/api-priority-processing/)', then the request
will be processed with the corresponding service tier.
- When not set, the default behavior is 'auto'.
When the `service_tier` parameter is set, the response body will include the
`service_tier` value based on the processing mode actually used to serve the
request. This response value may be different from the value set in the
parameter.
store: Whether to store the generated model response for later retrieval via API.
stream_options: Options for streaming responses. Only set this when you set `stream: true`.
temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
make the output more random, while lower values like 0.2 will make it more
focused and deterministic. We generally recommend altering this or `top_p` but
not both.
text: Configuration options for a text response from the model. Can be plain text or
structured JSON data. Learn more:
- [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
- [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs)
tool_choice: How the model should select which tool (or tools) to use when generating a
response. See the `tools` parameter to see how to specify which tools the model
can call.
tools: An array of tools the model may call while generating a response. You can
specify which tool to use by setting the `tool_choice` parameter.
We support the following categories of tools:
- **Built-in tools**: Tools that are provided by OpenAI that extend the model's
capabilities, like
[web search](https://platform.openai.com/docs/guides/tools-web-search) or
[file search](https://platform.openai.com/docs/guides/tools-file-search).
Learn more about
[built-in tools](https://platform.openai.com/docs/guides/tools).
- **MCP Tools**: Integrations with third-party systems via custom MCP servers or
predefined connectors such as Google Drive and SharePoint. Learn more about
[MCP Tools](https://platform.openai.com/docs/guides/tools-connectors-mcp).
- **Function calls (custom tools)**: Functions that are defined by you, enabling
the model to call your own code with strongly typed arguments and outputs.
Learn more about
[function calling](https://platform.openai.com/docs/guides/function-calling).
You can also use custom tools to call your own code.
top_logprobs: An integer between 0 and 20 specifying the number of most likely tokens to
return at each token position, each with an associated log probability.
top_p: An alternative to sampling with temperature, called nucleus sampling, where the
model considers the results of the tokens with top_p probability mass. So 0.1
means only the tokens comprising the top 10% probability mass are considered.
We generally recommend altering this or `temperature` but not both.
truncation: The truncation strategy to use for the model response.
- `auto`: If the input to this Response exceeds the model's context window size,
the model will truncate the response to fit the context window by dropping
items from the beginning of the conversation.
- `disabled` (default): If the input size will exceed the context window size
for a model, the request will fail with a 400 error.
user: This field is being replaced by `safety_identifier` and `prompt_cache_key`. Use
`prompt_cache_key` instead to maintain caching optimizations. A stable
identifier for your end-users. Used to boost cache hit rates by better bucketing
similar requests and to help OpenAI detect and prevent abuse.
[Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
...
def create(
self,
*,
background: Optional[bool] | Omit = omit,
conversation: Optional[response_create_params.Conversation] | Omit = omit,
include: Optional[List[ResponseIncludable]] | Omit = omit,
input: Union[str, ResponseInputParam] | Omit = omit,
instructions: Optional[str] | Omit = omit,
max_output_tokens: Optional[int] | Omit = omit,
max_tool_calls: Optional[int] | Omit = omit,
metadata: Optional[Metadata] | Omit = omit,
model: ResponsesModel | Omit = omit,
parallel_tool_calls: Optional[bool] | Omit = omit,
previous_response_id: Optional[str] | Omit = omit,
prompt: Optional[ResponsePromptParam] | Omit = omit,
prompt_cache_key: str | Omit = omit,
prompt_cache_retention: Optional[Literal["in-memory", "24h"]] | Omit = omit,
reasoning: Optional[Reasoning] | Omit = omit,
safety_identifier: str | Omit = omit,
service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | Omit = omit,
store: Optional[bool] | Omit = omit,
stream: Optional[Literal[False]] | Literal[True] | Omit = omit,
stream_options: Optional[response_create_params.StreamOptions] | Omit = omit,
temperature: Optional[float] | Omit = omit,
text: ResponseTextConfigParam | Omit = omit,
tool_choice: response_create_params.ToolChoice | Omit = omit,
tools: Iterable[ToolParam] | Omit = omit,
top_logprobs: Optional[int] | Omit = omit,
top_p: Optional[float] | Omit = omit,
truncation: Optional[Literal["auto", "disabled"]] | Omit = omit,
user: str | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> Response | Stream[ResponseStreamEvent]:
return self._post(
"/responses",
body=maybe_transform(
{
"background": background,
"conversation": conversation,
"include": include,
"input": input,
"instructions": instructions,
"max_output_tokens": max_output_tokens,
"max_tool_calls": max_tool_calls,
"metadata": metadata,
"model": model,
"parallel_tool_calls": parallel_tool_calls,
"previous_response_id": previous_response_id,
"prompt": prompt,
"prompt_cache_key": prompt_cache_key,
"prompt_cache_retention": prompt_cache_retention,
"reasoning": reasoning,
"safety_identifier": safety_identifier,
"service_tier": service_tier,
"store": store,
"stream": stream,
"stream_options": stream_options,
"temperature": temperature,
"text": text,
"tool_choice": tool_choice,
"tools": tools,
"top_logprobs": top_logprobs,
"top_p": top_p,
"truncation": truncation,
"user": user,
},
response_create_params.ResponseCreateParamsStreaming
if stream
else response_create_params.ResponseCreateParamsNonStreaming,
),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=Response,
stream=stream or False,
stream_cls=Stream[ResponseStreamEvent],
)
@overload
def stream(
self,
*,
response_id: str,
text_format: type[TextFormatT] | Omit = omit,
starting_after: int | Omit = omit,
tools: Iterable[ParseableToolParam] | Omit = omit,
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> ResponseStreamManager[TextFormatT]: ...
@overload
def stream(
self,
*,
input: Union[str, ResponseInputParam],
model: ResponsesModel,
background: Optional[bool] | Omit = omit,
text_format: type[TextFormatT] | Omit = omit,
tools: Iterable[ParseableToolParam] | Omit = omit,
conversation: Optional[response_create_params.Conversation] | Omit = omit,
include: Optional[List[ResponseIncludable]] | Omit = omit,
instructions: Optional[str] | Omit = omit,
max_output_tokens: Optional[int] | Omit = omit,
max_tool_calls: Optional[int] | Omit = omit,
metadata: Optional[Metadata] | Omit = omit,
parallel_tool_calls: Optional[bool] | Omit = omit,
previous_response_id: Optional[str] | Omit = omit,
prompt: Optional[ResponsePromptParam] | Omit = omit,
prompt_cache_key: str | Omit = omit,
prompt_cache_retention: Optional[Literal["in-memory", "24h"]] | Omit = omit,
reasoning: Optional[Reasoning] | Omit = omit,
safety_identifier: str | Omit = omit,
service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | Omit = omit,
store: Optional[bool] | Omit = omit,
stream_options: Optional[response_create_params.StreamOptions] | Omit = omit,
temperature: Optional[float] | Omit = omit,
text: ResponseTextConfigParam | Omit = omit,
tool_choice: response_create_params.ToolChoice | Omit = omit,
top_logprobs: Optional[int] | Omit = omit,
top_p: Optional[float] | Omit = omit,
truncation: Optional[Literal["auto", "disabled"]] | Omit = omit,
user: str | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> ResponseStreamManager[TextFormatT]: ...
def stream(
self,
*,
response_id: str | Omit = omit,
input: Union[str, ResponseInputParam] | Omit = omit,
model: ResponsesModel | Omit = omit,
background: Optional[bool] | Omit = omit,
text_format: type[TextFormatT] | Omit = omit,
tools: Iterable[ParseableToolParam] | Omit = omit,
conversation: Optional[response_create_params.Conversation] | Omit = omit,
include: Optional[List[ResponseIncludable]] | Omit = omit,
instructions: Optional[str] | Omit = omit,
max_output_tokens: Optional[int] | Omit = omit,
max_tool_calls: Optional[int] | Omit = omit,
metadata: Optional[Metadata] | Omit = omit,
parallel_tool_calls: Optional[bool] | Omit = omit,
previous_response_id: Optional[str] | Omit = omit,
prompt: Optional[ResponsePromptParam] | Omit = omit,
prompt_cache_key: str | Omit = omit,
prompt_cache_retention: Optional[Literal["in-memory", "24h"]] | Omit = omit,
reasoning: Optional[Reasoning] | Omit = omit,
safety_identifier: str | Omit = omit,
service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | Omit = omit,
store: Optional[bool] | Omit = omit,
stream_options: Optional[response_create_params.StreamOptions] | Omit = omit,
temperature: Optional[float] | Omit = omit,
text: ResponseTextConfigParam | Omit = omit,
tool_choice: response_create_params.ToolChoice | Omit = omit,
top_logprobs: Optional[int] | Omit = omit,
top_p: Optional[float] | Omit = omit,
truncation: Optional[Literal["auto", "disabled"]] | Omit = omit,
user: str | Omit = omit,
starting_after: int | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> ResponseStreamManager[TextFormatT]:
new_response_args = {
"input": input,
"model": model,
"conversation": conversation,
"include": include,
"instructions": instructions,
"max_output_tokens": max_output_tokens,
"max_tool_calls": max_tool_calls,
"metadata": metadata,
"parallel_tool_calls": parallel_tool_calls,
"previous_response_id": previous_response_id,
"prompt": prompt,
"prompt_cache_key": prompt_cache_key,
"prompt_cache_retention": prompt_cache_retention,
"reasoning": reasoning,
"safety_identifier": safety_identifier,
"service_tier": service_tier,
"store": store,
"stream_options": stream_options,
"temperature": temperature,
"text": text,
"tool_choice": tool_choice,
"top_logprobs": top_logprobs,
"top_p": top_p,
"truncation": truncation,
"user": user,
"background": background,
}
new_response_args_names = [k for k, v in new_response_args.items() if is_given(v)]
if (is_given(response_id) or is_given(starting_after)) and len(new_response_args_names) > 0:
raise ValueError(
"Cannot provide both response_id/starting_after can't be provided together with "
+ ", ".join(new_response_args_names)
)
tools = _make_tools(tools)
if len(new_response_args_names) > 0:
if not is_given(input):
raise ValueError("input must be provided when creating a new response")
if not is_given(model):
raise ValueError("model must be provided when creating a new response")
if is_given(text_format):
if not text:
text = {}
if "format" in text:
raise TypeError("Cannot mix and match text.format with text_format")
text["format"] = _type_to_text_format_param(text_format)
api_request: partial[Stream[ResponseStreamEvent]] = partial(
self.create,
input=input,
model=model,
tools=tools,
conversation=conversation,
include=include,
instructions=instructions,
max_output_tokens=max_output_tokens,
max_tool_calls=max_tool_calls,
metadata=metadata,
parallel_tool_calls=parallel_tool_calls,
previous_response_id=previous_response_id,
prompt=prompt,
prompt_cache_key=prompt_cache_key,
prompt_cache_retention=prompt_cache_retention,
store=store,
stream_options=stream_options,
stream=True,
temperature=temperature,
text=text,
tool_choice=tool_choice,
reasoning=reasoning,
safety_identifier=safety_identifier,
service_tier=service_tier,
top_logprobs=top_logprobs,
top_p=top_p,
truncation=truncation,
user=user,
background=background,
extra_headers=extra_headers,
extra_query=extra_query,
extra_body=extra_body,
timeout=timeout,
)
return ResponseStreamManager(api_request, text_format=text_format, input_tools=tools, starting_after=None)
else:
if not is_given(response_id):
raise ValueError("id must be provided when streaming an existing response")
return ResponseStreamManager(
lambda: self.retrieve(
response_id=response_id,
stream=True,
include=include or [],
extra_headers=extra_headers,
extra_query=extra_query,
extra_body=extra_body,
starting_after=omit,
timeout=timeout,
),
text_format=text_format,
input_tools=tools,
starting_after=starting_after if is_given(starting_after) else None,
)
def parse(
self,
*,
text_format: type[TextFormatT] | Omit = omit,
background: Optional[bool] | Omit = omit,
conversation: Optional[response_create_params.Conversation] | Omit = omit,
include: Optional[List[ResponseIncludable]] | Omit = omit,
input: Union[str, ResponseInputParam] | Omit = omit,
instructions: Optional[str] | Omit = omit,
max_output_tokens: Optional[int] | Omit = omit,
max_tool_calls: Optional[int] | Omit = omit,
metadata: Optional[Metadata] | Omit = omit,
model: ResponsesModel | Omit = omit,
parallel_tool_calls: Optional[bool] | Omit = omit,
previous_response_id: Optional[str] | Omit = omit,
prompt: Optional[ResponsePromptParam] | Omit = omit,
prompt_cache_key: str | Omit = omit,
prompt_cache_retention: Optional[Literal["in-memory", "24h"]] | Omit = omit,
reasoning: Optional[Reasoning] | Omit = omit,
safety_identifier: str | Omit = omit,
service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | Omit = omit,
store: Optional[bool] | Omit = omit,
stream: Optional[Literal[False]] | Literal[True] | Omit = omit,
stream_options: Optional[response_create_params.StreamOptions] | Omit = omit,
temperature: Optional[float] | Omit = omit,
text: ResponseTextConfigParam | Omit = omit,
tool_choice: response_create_params.ToolChoice | Omit = omit,
tools: Iterable[ParseableToolParam] | Omit = omit,
top_logprobs: Optional[int] | Omit = omit,
top_p: Optional[float] | Omit = omit,
truncation: Optional[Literal["auto", "disabled"]] | Omit = omit,
user: str | Omit = omit,
verbosity: Optional[Literal["low", "medium", "high"]] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> ParsedResponse[TextFormatT]:
if is_given(text_format):
if not text:
text = {}
if "format" in text:
raise TypeError("Cannot mix and match text.format with text_format")
text["format"] = _type_to_text_format_param(text_format)
tools = _make_tools(tools)
def parser(raw_response: Response) -> ParsedResponse[TextFormatT]:
return parse_response(
input_tools=tools,
text_format=text_format,
response=raw_response,
)
return self._post(
"/responses",
body=maybe_transform(
{
"background": background,
"conversation": conversation,
"include": include,
"input": input,
"instructions": instructions,
"max_output_tokens": max_output_tokens,
"max_tool_calls": max_tool_calls,
"metadata": metadata,
"model": model,
"parallel_tool_calls": parallel_tool_calls,
"previous_response_id": previous_response_id,
"prompt": prompt,
"prompt_cache_key": prompt_cache_key,
"prompt_cache_retention": prompt_cache_retention,
"reasoning": reasoning,
"safety_identifier": safety_identifier,
"service_tier": service_tier,
"store": store,
"stream": stream,
"stream_options": stream_options,
"temperature": temperature,
"text": text,
"tool_choice": tool_choice,
"tools": tools,
"top_logprobs": top_logprobs,
"top_p": top_p,
"truncation": truncation,
"user": user,
"verbosity": verbosity,
},
response_create_params.ResponseCreateParams,
),
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
extra_body=extra_body,
timeout=timeout,
post_parser=parser,
),
# we turn the `Response` instance into a `ParsedResponse`
# in the `parser` function above
cast_to=cast(Type[ParsedResponse[TextFormatT]], Response),
)
@overload
def retrieve(
self,
response_id: str,
*,
include: List[ResponseIncludable] | Omit = omit,
include_obfuscation: bool | Omit = omit,
starting_after: int | Omit = omit,
stream: Literal[False] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> Response: ...
@overload
def retrieve(
self,
response_id: str,
*,
stream: Literal[True],
include: List[ResponseIncludable] | Omit = omit,
starting_after: int | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> Stream[ResponseStreamEvent]: ...
@overload
def retrieve(
self,
response_id: str,
*,
stream: bool,
include: List[ResponseIncludable] | Omit = omit,
starting_after: int | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> Response | Stream[ResponseStreamEvent]: ...
@overload
def retrieve(
self,
response_id: str,
*,
stream: bool = False,
include: List[ResponseIncludable] | Omit = omit,
starting_after: int | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> Response | Stream[ResponseStreamEvent]:
"""
Retrieves a model response with the given ID.
Args:
include: Additional fields to include in the response. See the `include` parameter for
Response creation above for more information.
include_obfuscation: When true, stream obfuscation will be enabled. Stream obfuscation adds random
characters to an `obfuscation` field on streaming delta events to normalize
payload sizes as a mitigation to certain side-channel attacks. These obfuscation
fields are included by default, but add a small amount of overhead to the data
stream. You can set `include_obfuscation` to false to optimize for bandwidth if
you trust the network links between your application and the OpenAI API.
starting_after: The sequence number of the event after which to start streaming.
stream: If set to true, the model response data will be streamed to the client as it is
generated using
[server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format).
See the
[Streaming section below](https://platform.openai.com/docs/api-reference/responses-streaming)
for more information.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
...
@overload
def retrieve(
self,
response_id: str,
*,
stream: Literal[True],
include: List[ResponseIncludable] | Omit = omit,
include_obfuscation: bool | Omit = omit,
starting_after: int | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> Stream[ResponseStreamEvent]:
"""
Retrieves a model response with the given ID.
Args:
stream: If set to true, the model response data will be streamed to the client as it is
generated using
[server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format).
See the
[Streaming section below](https://platform.openai.com/docs/api-reference/responses-streaming)
for more information.
include: Additional fields to include in the response. See the `include` parameter for
Response creation above for more information.
include_obfuscation: When true, stream obfuscation will be enabled. Stream obfuscation adds random
characters to an `obfuscation` field on streaming delta events to normalize
payload sizes as a mitigation to certain side-channel attacks. These obfuscation
fields are included by default, but add a small amount of overhead to the data
stream. You can set `include_obfuscation` to false to optimize for bandwidth if
you trust the network links between your application and the OpenAI API.
starting_after: The sequence number of the event after which to start streaming.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
...
@overload
def retrieve(
self,
response_id: str,
*,
stream: bool,
include: List[ResponseIncludable] | Omit = omit,
include_obfuscation: bool | Omit = omit,
starting_after: int | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> Response | Stream[ResponseStreamEvent]:
"""
Retrieves a model response with the given ID.
Args:
stream: If set to true, the model response data will be streamed to the client as it is
generated using
[server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format).
See the
[Streaming section below](https://platform.openai.com/docs/api-reference/responses-streaming)
for more information.
include: Additional fields to include in the response. See the `include` parameter for
Response creation above for more information.
include_obfuscation: When true, stream obfuscation will be enabled. Stream obfuscation adds random
characters to an `obfuscation` field on streaming delta events to normalize
payload sizes as a mitigation to certain side-channel attacks. These obfuscation
fields are included by default, but add a small amount of overhead to the data
stream. You can set `include_obfuscation` to false to optimize for bandwidth if
you trust the network links between your application and the OpenAI API.
starting_after: The sequence number of the event after which to start streaming.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
...
def retrieve(
self,
response_id: str,
*,
include: List[ResponseIncludable] | Omit = omit,
include_obfuscation: bool | Omit = omit,
starting_after: int | Omit = omit,
stream: Literal[False] | Literal[True] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> Response | Stream[ResponseStreamEvent]:
if not response_id:
raise ValueError(f"Expected a non-empty value for `response_id` but received {response_id!r}")
return self._get(
f"/responses/{response_id}",
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
extra_body=extra_body,
timeout=timeout,
query=maybe_transform(
{
"include": include,
"include_obfuscation": include_obfuscation,
"starting_after": starting_after,
"stream": stream,
},
response_retrieve_params.ResponseRetrieveParams,
),
),
cast_to=Response,
stream=stream or False,
stream_cls=Stream[ResponseStreamEvent],
)
def delete(
self,
response_id: str,
*,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> None:
"""
Deletes a model response with the given ID.
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not response_id:
raise ValueError(f"Expected a non-empty value for `response_id` but received {response_id!r}")
extra_headers = {"Accept": "*/*", **(extra_headers or {})}
return self._delete(
f"/responses/{response_id}",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=NoneType,
)
def cancel(
self,
response_id: str,
*,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> Response:
"""Cancels a model response with the given ID.
Only responses created with the
`background` parameter set to `true` can be cancelled.
[Learn more](https://platform.openai.com/docs/guides/background).
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not response_id:
raise ValueError(f"Expected a non-empty value for `response_id` but received {response_id!r}")
return self._post(
f"/responses/{response_id}/cancel",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=Response,
)
| Responses |
python | google__jax | jax/_src/sharding_impls.py | {
"start": 18075,
"end": 18265
} | class ____(NamedTuple):
"""Represents a pmap mesh (only along the replica axes)."""
nreps: int
names: tuple[Any, ...]
sizes: tuple[int, ...]
@dataclasses.dataclass(frozen=True)
| AxisEnv |
python | walkccc__LeetCode | solutions/3094. Guess the Number Using Bitwise Questions II/3094.py | {
"start": 68,
"end": 305
} | class ____:
def findNumber(self) -> int:
ans = 0
sameCount = commonBits(0)
for i in range(31):
if commonBits(1 << i) > sameCount:
ans |= 1 << i
commonBits(1 << i) # Revert the XOR.
return ans
| Solution |
python | realpython__materials | python-class/crafts.py | {
"start": 0,
"end": 412
} | class ____:
def __init__(self, make, model, color):
self.make = make
self.model = model
self.color = color
def start(self):
print("Starting the engine...")
def stop(self):
print("Stopping the engine...")
def show_technical_specs(self):
print(f"Make: {self.make}")
print(f"Model: {self.model}")
print(f"Color: {self.color}")
| Vehicle |
python | HypothesisWorks__hypothesis | hypothesis-python/tests/cover/test_lookup.py | {
"start": 21043,
"end": 22866
} | class ____:
def __init__(self, value: int, next_node: typing.Optional["SomeClass"]) -> None:
assert value > 0
self.value = value
self.next_node = next_node
def __repr__(self) -> str:
return f"SomeClass({self.value}, next_node={self.next_node})"
def test_resolving_recursive_type_with_registered_constraint():
with temp_registered(
SomeClass, st.builds(SomeClass, value=st.integers(min_value=1))
):
@given(s=st.from_type(SomeClass))
def test(s):
assert isinstance(s, SomeClass)
test()
def test_resolving_recursive_type_with_registered_constraint_not_none():
with temp_registered(
SomeClass, st.builds(SomeClass, value=st.integers(min_value=1))
):
s = st.from_type(SomeClass)
print(s, s.wrapped_strategy)
find_any(s, lambda s: s.next_node is not None)
@given(from_type(tuple[()]) | from_type(_Tuple[()]))
def test_resolves_empty_Tuple_issue_1583_regression(ex):
# See e.g. https://github.com/python/mypy/commit/71332d58
assert ex == ()
def test_can_register_NewType():
Name = typing.NewType("Name", str)
st.register_type_strategy(Name, st.just("Eric Idle"))
assert_simple_property(st.from_type(Name), lambda x: x == "Eric Idle")
@given(st.from_type(typing.Callable))
def test_resolves_bare_callable_to_function(f):
val = f()
assert val is None
with pytest.raises(TypeError):
f(1)
@given(st.from_type(typing.Callable[[str], int]))
def test_resolves_callable_with_arg_to_function(f):
val = f("1")
assert isinstance(val, int)
@given(st.from_type(typing.Callable[..., int]))
def test_resolves_ellipses_callable_to_function(f):
val = f()
assert isinstance(val, int)
f(1)
f(1, 2, 3)
f(accepts_kwargs_too=1)
| SomeClass |
python | walkccc__LeetCode | solutions/3169. Count Days Without Meetings/3169.py | {
"start": 0,
"end": 304
} | class ____:
def countDays(self, days: int, meetings: list[list[int]]) -> int:
freeDays = 0
prevEnd = 0
for start, end in sorted(meetings):
if start > prevEnd:
freeDays += start - prevEnd - 1
prevEnd = max(prevEnd, end)
return freeDays + max(0, days - prevEnd)
| Solution |
python | PyCQA__pylint | tests/functional/ext/no_self_use/no_self_use.py | {
"start": 2302,
"end": 2569
} | class ____:
def a(self, /): # [no-self-use]
...
# Disable with old error code
# pylint: disable=use-symbolic-message-instead
def b(self, /): # pylint: disable=R0201
...
def func_a(self): # pylint: disable=unused-argument
pass
| C |
python | getsentry__sentry | src/sentry/search/events/builder/errors.py | {
"start": 6254,
"end": 7118
} | class ____(ErrorsQueryBuilderMixin, TopEventsQueryBuilder):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@property
def time_column(self) -> SelectType:
return Column("time", entity=Entity(self.dataset.value, alias=self.dataset.value))
def get_snql_query(self) -> Request:
return Request(
dataset=self.dataset.value,
app_id="errors",
query=Query(
match=self.match,
select=self.select,
where=self.where,
having=self.having,
groupby=self.groupby,
orderby=[OrderBy(self.time_column, Direction.ASC)],
granularity=self.granularity,
limit=self.limit,
),
tenant_ids=self.tenant_ids,
)
| ErrorsTopEventsQueryBuilder |
python | matplotlib__matplotlib | lib/matplotlib/scale.py | {
"start": 21633,
"end": 25122
} | class ____(ScaleBase):
"""
A quasi-logarithmic scale based on the inverse hyperbolic sine (asinh)
For values close to zero, this is essentially a linear scale,
but for large magnitude values (either positive or negative)
it is asymptotically logarithmic. The transition between these
linear and logarithmic regimes is smooth, and has no discontinuities
in the function gradient in contrast to
the `.SymmetricalLogScale` ("symlog") scale.
Specifically, the transformation of an axis coordinate :math:`a` is
:math:`a \\rightarrow a_0 \\sinh^{-1} (a / a_0)` where :math:`a_0`
is the effective width of the linear region of the transformation.
In that region, the transformation is
:math:`a \\rightarrow a + \\mathcal{O}(a^3)`.
For large values of :math:`a` the transformation behaves as
:math:`a \\rightarrow a_0 \\, \\mathrm{sgn}(a) \\ln |a| + \\mathcal{O}(1)`.
.. note::
This API is provisional and may be revised in the future
based on early user feedback.
"""
name = 'asinh'
auto_tick_multipliers = {
3: (2, ),
4: (2, ),
5: (2, ),
8: (2, 4),
10: (2, 5),
16: (2, 4, 8),
64: (4, 16),
1024: (256, 512)
}
@_make_axis_parameter_optional
def __init__(self, axis=None, *, linear_width=1.0,
base=10, subs='auto', **kwargs):
"""
Parameters
----------
axis : `~matplotlib.axis.Axis`
The axis for the scale.
.. note::
This parameter is unused and about to be removed in the future.
It can already now be left out because of special preprocessing,
so that ``AsinhScale()`` is valid.
linear_width : float, default: 1
The scale parameter (elsewhere referred to as :math:`a_0`)
defining the extent of the quasi-linear region,
and the coordinate values beyond which the transformation
becomes asymptotically logarithmic.
base : int, default: 10
The number base used for rounding tick locations
on a logarithmic scale. If this is less than one,
then rounding is to the nearest integer multiple
of powers of ten.
subs : sequence of int
Multiples of the number base used for minor ticks.
If set to 'auto', this will use built-in defaults,
e.g. (2, 5) for base=10.
"""
super().__init__(axis)
self._transform = AsinhTransform(linear_width)
self._base = int(base)
if subs == 'auto':
self._subs = self.auto_tick_multipliers.get(self._base)
else:
self._subs = subs
linear_width = property(lambda self: self._transform.linear_width)
def get_transform(self):
return self._transform
def set_default_locators_and_formatters(self, axis):
axis.set(major_locator=AsinhLocator(self.linear_width,
base=self._base),
minor_locator=AsinhLocator(self.linear_width,
base=self._base,
subs=self._subs),
minor_formatter=NullFormatter())
if self._base > 1:
axis.set_major_formatter(LogFormatterSciNotation(self._base))
else:
axis.set_major_formatter('{x:.3g}')
| AsinhScale |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.